aboutsummaryrefslogtreecommitdiff
path: root/Lib/fontTools/ttLib
diff options
context:
space:
mode:
Diffstat (limited to 'Lib/fontTools/ttLib')
-rw-r--r--Lib/fontTools/ttLib/__init__.py15
-rw-r--r--Lib/fontTools/ttLib/__main__.py108
-rw-r--r--Lib/fontTools/ttLib/macUtils.py80
-rw-r--r--Lib/fontTools/ttLib/scaleUpem.py86
-rw-r--r--Lib/fontTools/ttLib/sfnt.py1007
-rw-r--r--Lib/fontTools/ttLib/standardGlyphOrder.py520
-rw-r--r--Lib/fontTools/ttLib/tables/B_A_S_E_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py49
-rw-r--r--Lib/fontTools/ttLib/tables/C_B_D_T_.py117
-rw-r--r--Lib/fontTools/ttLib/tables/C_B_L_C_.py4
-rw-r--r--Lib/fontTools/ttLib/tables/C_F_F_.py80
-rw-r--r--Lib/fontTools/ttLib/tables/C_F_F__2.py1
-rw-r--r--Lib/fontTools/ttLib/tables/C_O_L_R_.py294
-rw-r--r--Lib/fontTools/ttLib/tables/C_P_A_L_.py492
-rw-r--r--Lib/fontTools/ttLib/tables/D_S_I_G_.py176
-rw-r--r--Lib/fontTools/ttLib/tables/D__e_b_g.py2
-rw-r--r--Lib/fontTools/ttLib/tables/DefaultTable.py89
-rw-r--r--Lib/fontTools/ttLib/tables/E_B_D_T_.py1375
-rw-r--r--Lib/fontTools/ttLib/tables/E_B_L_C_.py1139
-rw-r--r--Lib/fontTools/ttLib/tables/F_F_T_M_.py48
-rw-r--r--Lib/fontTools/ttLib/tables/F__e_a_t.py97
-rw-r--r--Lib/fontTools/ttLib/tables/G_D_E_F_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/G_M_A_P_.py191
-rw-r--r--Lib/fontTools/ttLib/tables/G_P_K_G_.py195
-rw-r--r--Lib/fontTools/ttLib/tables/G_P_O_S_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/G_S_U_B_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/G__l_a_t.py128
-rw-r--r--Lib/fontTools/ttLib/tables/G__l_o_c.py51
-rw-r--r--Lib/fontTools/ttLib/tables/H_V_A_R_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/J_S_T_F_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/L_T_S_H_.py70
-rw-r--r--Lib/fontTools/ttLib/tables/M_A_T_H_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/M_E_T_A_.py514
-rw-r--r--Lib/fontTools/ttLib/tables/M_V_A_R_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/O_S_2f_2.py933
-rw-r--r--Lib/fontTools/ttLib/tables/S_I_N_G_.py127
-rw-r--r--Lib/fontTools/ttLib/tables/S_V_G_.py290
-rw-r--r--Lib/fontTools/ttLib/tables/S__i_l_f.py672
-rw-r--r--Lib/fontTools/ttLib/tables/S__i_l_l.py57
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I_B_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I_D_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I_J_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I_P_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I_S_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I_V_.py30
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I__0.py80
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I__1.py279
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I__2.py4
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I__3.py11
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I__5.py59
-rw-r--r--Lib/fontTools/ttLib/tables/T_T_F_A_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/TupleVariation.py1500
-rw-r--r--Lib/fontTools/ttLib/tables/V_D_M_X_.py385
-rw-r--r--Lib/fontTools/ttLib/tables/V_O_R_G_.py280
-rw-r--r--Lib/fontTools/ttLib/tables/V_V_A_R_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/__init__.py181
-rw-r--r--Lib/fontTools/ttLib/tables/_a_n_k_r.py2
-rw-r--r--Lib/fontTools/ttLib/tables/_a_v_a_r.py104
-rw-r--r--Lib/fontTools/ttLib/tables/_c_i_d_g.py20
-rw-r--r--Lib/fontTools/ttLib/tables/_c_m_a_p.py2825
-rw-r--r--Lib/fontTools/ttLib/tables/_c_v_a_r.py30
-rw-r--r--Lib/fontTools/ttLib/tables/_c_v_t.py80
-rw-r--r--Lib/fontTools/ttLib/tables/_f_e_a_t.py15
-rw-r--r--Lib/fontTools/ttLib/tables/_f_p_g_m.py83
-rw-r--r--Lib/fontTools/ttLib/tables/_f_v_a_r.py83
-rw-r--r--Lib/fontTools/ttLib/tables/_g_a_s_p.py76
-rw-r--r--Lib/fontTools/ttLib/tables/_g_l_y_f.py4350
-rw-r--r--Lib/fontTools/ttLib/tables/_g_v_a_r.py450
-rw-r--r--Lib/fontTools/ttLib/tables/_h_d_m_x.py185
-rw-r--r--Lib/fontTools/ttLib/tables/_h_e_a_d.py166
-rw-r--r--Lib/fontTools/ttLib/tables/_h_h_e_a.py198
-rw-r--r--Lib/fontTools/ttLib/tables/_h_m_t_x.py241
-rw-r--r--Lib/fontTools/ttLib/tables/_k_e_r_n.py520
-rw-r--r--Lib/fontTools/ttLib/tables/_l_c_a_r.py2
-rw-r--r--Lib/fontTools/ttLib/tables/_l_o_c_a.py104
-rw-r--r--Lib/fontTools/ttLib/tables/_l_t_a_g.py113
-rw-r--r--Lib/fontTools/ttLib/tables/_m_a_x_p.py207
-rw-r--r--Lib/fontTools/ttLib/tables/_m_e_t_a.py29
-rw-r--r--Lib/fontTools/ttLib/tables/_n_a_m_e.py1913
-rw-r--r--Lib/fontTools/ttLib/tables/_p_o_s_t.py490
-rw-r--r--Lib/fontTools/ttLib/tables/_p_r_e_p.py3
-rw-r--r--Lib/fontTools/ttLib/tables/_s_b_i_x.py164
-rw-r--r--Lib/fontTools/ttLib/tables/_t_r_a_k.py515
-rw-r--r--Lib/fontTools/ttLib/tables/_v_h_e_a.py185
-rw-r--r--Lib/fontTools/ttLib/tables/_v_m_t_x.py10
-rw-r--r--Lib/fontTools/ttLib/tables/asciiTable.py29
-rw-r--r--Lib/fontTools/ttLib/tables/grUtils.py39
-rw-r--r--Lib/fontTools/ttLib/tables/otBase.py2628
-rw-r--r--Lib/fontTools/ttLib/tables/otConverters.py3339
-rw-r--r--[-rwxr-xr-x]Lib/fontTools/ttLib/tables/otData.py8189
-rw-r--r--Lib/fontTools/ttLib/tables/otTables.py3964
-rw-r--r--Lib/fontTools/ttLib/tables/otTraverse.py32
-rw-r--r--Lib/fontTools/ttLib/tables/sbixGlyph.py218
-rw-r--r--Lib/fontTools/ttLib/tables/sbixStrike.py277
-rw-r--r--Lib/fontTools/ttLib/tables/ttProgram.py1011
-rw-r--r--Lib/fontTools/ttLib/ttCollection.py226
-rw-r--r--Lib/fontTools/ttLib/ttFont.py2002
-rw-r--r--Lib/fontTools/ttLib/ttGlyphSet.py519
-rw-r--r--Lib/fontTools/ttLib/woff2.py2947
99 files changed, 28906 insertions, 21529 deletions
diff --git a/Lib/fontTools/ttLib/__init__.py b/Lib/fontTools/ttLib/__init__.py
index dadd7f20..ed00764f 100644
--- a/Lib/fontTools/ttLib/__init__.py
+++ b/Lib/fontTools/ttLib/__init__.py
@@ -6,12 +6,21 @@ import logging
log = logging.getLogger(__name__)
-class TTLibError(Exception): pass
+
+class TTLibError(Exception):
+ pass
+
+
+class TTLibFileIsCollectionError(TTLibError):
+ pass
+
@deprecateFunction("use logging instead", category=DeprecationWarning)
def debugmsg(msg):
- import time
- print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time())))
+ import time
+
+ print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time())))
+
from fontTools.ttLib.ttFont import *
from fontTools.ttLib.ttCollection import TTCollection
diff --git a/Lib/fontTools/ttLib/__main__.py b/Lib/fontTools/ttLib/__main__.py
new file mode 100644
index 00000000..2733444d
--- /dev/null
+++ b/Lib/fontTools/ttLib/__main__.py
@@ -0,0 +1,108 @@
+import sys
+from fontTools.ttLib import TTLibError, TTLibFileIsCollectionError
+from fontTools.ttLib.ttFont import *
+from fontTools.ttLib.ttCollection import TTCollection
+
+
+def main(args=None):
+ """Open/save fonts with TTFont() or TTCollection()
+
+ ./fonttools ttLib [-oFILE] [-yNUMBER] files...
+
+ If multiple files are given on the command-line,
+ they are each opened (as a font or collection),
+ and added to the font list.
+
+ If -o (output-file) argument is given, the font
+ list is then saved to the output file, either as
+ a single font, if there is only one font, or as
+ a collection otherwise.
+
+ If -y (font-number) argument is given, only the
+ specified font from collections is opened.
+
+ The above allow extracting a single font from a
+ collection, or combining multiple fonts into a
+ collection.
+
+ If --lazy or --no-lazy are give, those are passed
+ to the TTFont() or TTCollection() constructors.
+ """
+ from fontTools import configLogger
+
+ if args is None:
+ args = sys.argv[1:]
+
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ "fonttools ttLib",
+ description="Open/save fonts with TTFont() or TTCollection()",
+ epilog="""
+ If multiple files are given on the command-line,
+ they are each opened (as a font or collection),
+ and added to the font list.
+
+ The above, when combined with -o / --output,
+ allows for extracting a single font from a
+ collection, or combining multiple fonts into a
+ collection.
+ """,
+ )
+ parser.add_argument("font", metavar="font", nargs="*", help="Font file.")
+ parser.add_argument(
+ "-t", "--table", metavar="table", nargs="*", help="Tables to decompile."
+ )
+ parser.add_argument(
+ "-o", "--output", metavar="FILE", default=None, help="Output file."
+ )
+ parser.add_argument(
+ "-y", metavar="NUMBER", default=-1, help="Font number to load from collections."
+ )
+ parser.add_argument(
+ "--lazy", action="store_true", default=None, help="Load fonts lazily."
+ )
+ parser.add_argument(
+ "--no-lazy", dest="lazy", action="store_false", help="Load fonts immediately."
+ )
+ parser.add_argument(
+ "--flavor",
+ dest="flavor",
+ default=None,
+ help="Flavor of output font. 'woff' or 'woff2'.",
+ )
+ options = parser.parse_args(args)
+
+ fontNumber = int(options.y) if options.y is not None else None
+ outFile = options.output
+ lazy = options.lazy
+ flavor = options.flavor
+ tables = options.table if options.table is not None else []
+
+ fonts = []
+ for f in options.font:
+ try:
+ font = TTFont(f, fontNumber=fontNumber, lazy=lazy)
+ fonts.append(font)
+ except TTLibFileIsCollectionError:
+ collection = TTCollection(f, lazy=lazy)
+ fonts.extend(collection.fonts)
+
+ for font in fonts:
+ for table in tables if "*" not in tables else font.keys():
+ font[table] # Decompiles
+
+ if outFile is not None:
+ if len(fonts) == 1:
+ fonts[0].flavor = flavor
+ fonts[0].save(outFile)
+ else:
+ if flavor is not None:
+ raise TTLibError("Cannot set flavor for collections.")
+ collection = TTCollection()
+ collection.fonts = fonts
+ collection.save(outFile)
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/Lib/fontTools/ttLib/macUtils.py b/Lib/fontTools/ttLib/macUtils.py
index 496fb672..468a75ad 100644
--- a/Lib/fontTools/ttLib/macUtils.py
+++ b/Lib/fontTools/ttLib/macUtils.py
@@ -4,49 +4,51 @@ from fontTools.misc.macRes import ResourceReader, ResourceError
def getSFNTResIndices(path):
- """Determine whether a file has a 'sfnt' resource fork or not."""
- try:
- reader = ResourceReader(path)
- indices = reader.getIndices('sfnt')
- reader.close()
- return indices
- except ResourceError:
- return []
+ """Determine whether a file has a 'sfnt' resource fork or not."""
+ try:
+ reader = ResourceReader(path)
+ indices = reader.getIndices("sfnt")
+ reader.close()
+ return indices
+ except ResourceError:
+ return []
def openTTFonts(path):
- """Given a pathname, return a list of TTFont objects. In the case
- of a flat TTF/OTF file, the list will contain just one font object;
- but in the case of a Mac font suitcase it will contain as many
- font objects as there are sfnt resources in the file.
- """
- from fontTools import ttLib
- fonts = []
- sfnts = getSFNTResIndices(path)
- if not sfnts:
- fonts.append(ttLib.TTFont(path))
- else:
- for index in sfnts:
- fonts.append(ttLib.TTFont(path, index))
- if not fonts:
- raise ttLib.TTLibError("no fonts found in file '%s'" % path)
- return fonts
+ """Given a pathname, return a list of TTFont objects. In the case
+ of a flat TTF/OTF file, the list will contain just one font object;
+ but in the case of a Mac font suitcase it will contain as many
+ font objects as there are sfnt resources in the file.
+ """
+ from fontTools import ttLib
+
+ fonts = []
+ sfnts = getSFNTResIndices(path)
+ if not sfnts:
+ fonts.append(ttLib.TTFont(path))
+ else:
+ for index in sfnts:
+ fonts.append(ttLib.TTFont(path, index))
+ if not fonts:
+ raise ttLib.TTLibError("no fonts found in file '%s'" % path)
+ return fonts
class SFNTResourceReader(BytesIO):
- """Simple read-only file wrapper for 'sfnt' resources."""
-
- def __init__(self, path, res_name_or_index):
- from fontTools import ttLib
- reader = ResourceReader(path)
- if isinstance(res_name_or_index, str):
- rsrc = reader.getNamedResource('sfnt', res_name_or_index)
- else:
- rsrc = reader.getIndResource('sfnt', res_name_or_index)
- if rsrc is None:
- raise ttLib.TTLibError("sfnt resource not found: %s" % res_name_or_index)
- reader.close()
- self.rsrc = rsrc
- super(SFNTResourceReader, self).__init__(rsrc.data)
- self.name = path
+ """Simple read-only file wrapper for 'sfnt' resources."""
+
+ def __init__(self, path, res_name_or_index):
+ from fontTools import ttLib
+
+ reader = ResourceReader(path)
+ if isinstance(res_name_or_index, str):
+ rsrc = reader.getNamedResource("sfnt", res_name_or_index)
+ else:
+ rsrc = reader.getIndResource("sfnt", res_name_or_index)
+ if rsrc is None:
+ raise ttLib.TTLibError("sfnt resource not found: %s" % res_name_or_index)
+ reader.close()
+ self.rsrc = rsrc
+ super(SFNTResourceReader, self).__init__(rsrc.data)
+ self.name = path
diff --git a/Lib/fontTools/ttLib/scaleUpem.py b/Lib/fontTools/ttLib/scaleUpem.py
index 9e0e0ade..3f9b22af 100644
--- a/Lib/fontTools/ttLib/scaleUpem.py
+++ b/Lib/fontTools/ttLib/scaleUpem.py
@@ -10,7 +10,9 @@ import fontTools.ttLib.tables.otBase as otBase
import fontTools.ttLib.tables.otTables as otTables
from fontTools.cffLib import VarStoreData
import fontTools.cffLib.specializer as cffSpecializer
+from fontTools.varLib import builder # for VarData.calculateNumShorts
from fontTools.misc.fixedTools import otRound
+from fontTools.ttLib.tables._g_l_y_f import VarComponentFlags
__all__ = ["scale_upem", "ScalerVisitor"]
@@ -111,30 +113,82 @@ def visit(visitor, obj, attr, VOriginRecords):
@ScalerVisitor.register_attr(ttLib.getTableClass("glyf"), "glyphs")
def visit(visitor, obj, attr, glyphs):
for g in glyphs.values():
+ for attr in ("xMin", "xMax", "yMin", "yMax"):
+ v = getattr(g, attr, None)
+ if v is not None:
+ setattr(g, attr, visitor.scale(v))
+
if g.isComposite():
for component in g.components:
component.x = visitor.scale(component.x)
component.y = visitor.scale(component.y)
- else:
- for attr in ("xMin", "xMax", "yMin", "yMax"):
- v = getattr(g, attr, None)
- if v is not None:
- setattr(g, attr, visitor.scale(v))
+ continue
- glyf = visitor.font["glyf"]
- coordinates = g.getCoordinates(glyf)[0]
- for i, (x, y) in enumerate(coordinates):
- coordinates[i] = visitor.scale(x), visitor.scale(y)
+ if g.isVarComposite():
+ for component in g.components:
+ for attr in ("translateX", "translateY", "tCenterX", "tCenterY"):
+ v = getattr(component.transform, attr)
+ setattr(component.transform, attr, visitor.scale(v))
+ continue
+
+ if hasattr(g, "coordinates"):
+ coordinates = g.coordinates
+ for i, (x, y) in enumerate(coordinates):
+ coordinates[i] = visitor.scale(x), visitor.scale(y)
@ScalerVisitor.register_attr(ttLib.getTableClass("gvar"), "variations")
def visit(visitor, obj, attr, variations):
- for varlist in variations.values():
+ # VarComposites are a pain to handle :-(
+ glyfTable = visitor.font["glyf"]
+
+ for glyphName, varlist in variations.items():
+ glyph = glyfTable[glyphName]
+ isVarComposite = glyph.isVarComposite()
for var in varlist:
coordinates = var.coordinates
- for i, xy in enumerate(coordinates):
- if xy is None:
- continue
+
+ if not isVarComposite:
+ for i, xy in enumerate(coordinates):
+ if xy is None:
+ continue
+ coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
+ continue
+
+ # VarComposite glyph
+
+ i = 0
+ for component in glyph.components:
+ if component.flags & VarComponentFlags.AXES_HAVE_VARIATION:
+ i += len(component.location)
+ if component.flags & (
+ VarComponentFlags.HAVE_TRANSLATE_X
+ | VarComponentFlags.HAVE_TRANSLATE_Y
+ ):
+ xy = coordinates[i]
+ coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
+ i += 1
+ if component.flags & VarComponentFlags.HAVE_ROTATION:
+ i += 1
+ if component.flags & (
+ VarComponentFlags.HAVE_SCALE_X | VarComponentFlags.HAVE_SCALE_Y
+ ):
+ i += 1
+ if component.flags & (
+ VarComponentFlags.HAVE_SKEW_X | VarComponentFlags.HAVE_SKEW_Y
+ ):
+ i += 1
+ if component.flags & (
+ VarComponentFlags.HAVE_TCENTER_X | VarComponentFlags.HAVE_TCENTER_Y
+ ):
+ xy = coordinates[i]
+ coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
+ i += 1
+
+ # Phantom points
+ assert i + 4 == len(coordinates)
+ for i in range(i, len(coordinates)):
+ xy = coordinates[i]
coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
@@ -149,7 +203,8 @@ def visit(visitor, obj, attr, kernTables):
def _cff_scale(visitor, args):
for i, arg in enumerate(args):
if not isinstance(arg, list):
- args[i] = visitor.scale(arg)
+ if not isinstance(arg, bytes):
+ args[i] = visitor.scale(arg)
else:
num_blends = arg[-1]
_cff_scale(visitor, arg)
@@ -176,6 +231,8 @@ def visit(visitor, obj, attr, cff):
c.program, getNumRegions=getNumRegions
)
for op, args in commands:
+ if op == "vsindex":
+ continue
_cff_scale(visitor, args)
c.program[:] = cffSpecializer.commandsToProgram(commands)
@@ -231,6 +288,7 @@ def visit(visitor, varData):
for item in varData.Item:
for i, v in enumerate(item):
item[i] = visitor.scale(v)
+ varData.calculateNumShorts()
# COLRv1
diff --git a/Lib/fontTools/ttLib/sfnt.py b/Lib/fontTools/ttLib/sfnt.py
index e7c06337..b1569423 100644
--- a/Lib/fontTools/ttLib/sfnt.py
+++ b/Lib/fontTools/ttLib/sfnt.py
@@ -16,7 +16,7 @@ from io import BytesIO
from types import SimpleNamespace
from fontTools.misc.textTools import Tag
from fontTools.misc import sstruct
-from fontTools.ttLib import TTLibError
+from fontTools.ttLib import TTLibError, TTLibFileIsCollectionError
import struct
from collections import OrderedDict
import logging
@@ -26,127 +26,130 @@ log = logging.getLogger(__name__)
class SFNTReader(object):
-
- def __new__(cls, *args, **kwargs):
- """ Return an instance of the SFNTReader sub-class which is compatible
- with the input file type.
- """
- if args and cls is SFNTReader:
- infile = args[0]
- infile.seek(0)
- sfntVersion = Tag(infile.read(4))
- infile.seek(0)
- if sfntVersion == "wOF2":
- # return new WOFF2Reader object
- from fontTools.ttLib.woff2 import WOFF2Reader
- return object.__new__(WOFF2Reader)
- # return default object
- return object.__new__(cls)
-
- def __init__(self, file, checkChecksums=0, fontNumber=-1):
- self.file = file
- self.checkChecksums = checkChecksums
-
- self.flavor = None
- self.flavorData = None
- self.DirectoryEntry = SFNTDirectoryEntry
- self.file.seek(0)
- self.sfntVersion = self.file.read(4)
- self.file.seek(0)
- if self.sfntVersion == b"ttcf":
- header = readTTCHeader(self.file)
- numFonts = header.numFonts
- if not 0 <= fontNumber < numFonts:
- raise TTLibError("specify a font number between 0 and %d (inclusive)" % (numFonts - 1))
- self.numFonts = numFonts
- self.file.seek(header.offsetTable[fontNumber])
- data = self.file.read(sfntDirectorySize)
- if len(data) != sfntDirectorySize:
- raise TTLibError("Not a Font Collection (not enough data)")
- sstruct.unpack(sfntDirectoryFormat, data, self)
- elif self.sfntVersion == b"wOFF":
- self.flavor = "woff"
- self.DirectoryEntry = WOFFDirectoryEntry
- data = self.file.read(woffDirectorySize)
- if len(data) != woffDirectorySize:
- raise TTLibError("Not a WOFF font (not enough data)")
- sstruct.unpack(woffDirectoryFormat, data, self)
- else:
- data = self.file.read(sfntDirectorySize)
- if len(data) != sfntDirectorySize:
- raise TTLibError("Not a TrueType or OpenType font (not enough data)")
- sstruct.unpack(sfntDirectoryFormat, data, self)
- self.sfntVersion = Tag(self.sfntVersion)
-
- if self.sfntVersion not in ("\x00\x01\x00\x00", "OTTO", "true"):
- raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)")
- tables = {}
- for i in range(self.numTables):
- entry = self.DirectoryEntry()
- entry.fromFile(self.file)
- tag = Tag(entry.tag)
- tables[tag] = entry
- self.tables = OrderedDict(sorted(tables.items(), key=lambda i: i[1].offset))
-
- # Load flavor data if any
- if self.flavor == "woff":
- self.flavorData = WOFFFlavorData(self)
-
- def has_key(self, tag):
- return tag in self.tables
-
- __contains__ = has_key
-
- def keys(self):
- return self.tables.keys()
-
- def __getitem__(self, tag):
- """Fetch the raw table data."""
- entry = self.tables[Tag(tag)]
- data = entry.loadData (self.file)
- if self.checkChecksums:
- if tag == 'head':
- # Beh: we have to special-case the 'head' table.
- checksum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:])
- else:
- checksum = calcChecksum(data)
- if self.checkChecksums > 1:
- # Be obnoxious, and barf when it's wrong
- assert checksum == entry.checkSum, "bad checksum for '%s' table" % tag
- elif checksum != entry.checkSum:
- # Be friendly, and just log a warning.
- log.warning("bad checksum for '%s' table", tag)
- return data
-
- def __delitem__(self, tag):
- del self.tables[Tag(tag)]
-
- def close(self):
- self.file.close()
-
- # We define custom __getstate__ and __setstate__ to make SFNTReader pickle-able
- # and deepcopy-able. When a TTFont is loaded as lazy=True, SFNTReader holds a
- # reference to an external file object which is not pickleable. So in __getstate__
- # we store the file name and current position, and in __setstate__ we reopen the
- # same named file after unpickling.
-
- def __getstate__(self):
- if isinstance(self.file, BytesIO):
- # BytesIO is already pickleable, return the state unmodified
- return self.__dict__
-
- # remove unpickleable file attribute, and only store its name and pos
- state = self.__dict__.copy()
- del state["file"]
- state["_filename"] = self.file.name
- state["_filepos"] = self.file.tell()
- return state
-
- def __setstate__(self, state):
- if "file" not in state:
- self.file = open(state.pop("_filename"), "rb")
- self.file.seek(state.pop("_filepos"))
- self.__dict__.update(state)
+ def __new__(cls, *args, **kwargs):
+ """Return an instance of the SFNTReader sub-class which is compatible
+ with the input file type.
+ """
+ if args and cls is SFNTReader:
+ infile = args[0]
+ infile.seek(0)
+ sfntVersion = Tag(infile.read(4))
+ infile.seek(0)
+ if sfntVersion == "wOF2":
+ # return new WOFF2Reader object
+ from fontTools.ttLib.woff2 import WOFF2Reader
+
+ return object.__new__(WOFF2Reader)
+ # return default object
+ return object.__new__(cls)
+
+ def __init__(self, file, checkChecksums=0, fontNumber=-1):
+ self.file = file
+ self.checkChecksums = checkChecksums
+
+ self.flavor = None
+ self.flavorData = None
+ self.DirectoryEntry = SFNTDirectoryEntry
+ self.file.seek(0)
+ self.sfntVersion = self.file.read(4)
+ self.file.seek(0)
+ if self.sfntVersion == b"ttcf":
+ header = readTTCHeader(self.file)
+ numFonts = header.numFonts
+ if not 0 <= fontNumber < numFonts:
+ raise TTLibFileIsCollectionError(
+ "specify a font number between 0 and %d (inclusive)"
+ % (numFonts - 1)
+ )
+ self.numFonts = numFonts
+ self.file.seek(header.offsetTable[fontNumber])
+ data = self.file.read(sfntDirectorySize)
+ if len(data) != sfntDirectorySize:
+ raise TTLibError("Not a Font Collection (not enough data)")
+ sstruct.unpack(sfntDirectoryFormat, data, self)
+ elif self.sfntVersion == b"wOFF":
+ self.flavor = "woff"
+ self.DirectoryEntry = WOFFDirectoryEntry
+ data = self.file.read(woffDirectorySize)
+ if len(data) != woffDirectorySize:
+ raise TTLibError("Not a WOFF font (not enough data)")
+ sstruct.unpack(woffDirectoryFormat, data, self)
+ else:
+ data = self.file.read(sfntDirectorySize)
+ if len(data) != sfntDirectorySize:
+ raise TTLibError("Not a TrueType or OpenType font (not enough data)")
+ sstruct.unpack(sfntDirectoryFormat, data, self)
+ self.sfntVersion = Tag(self.sfntVersion)
+
+ if self.sfntVersion not in ("\x00\x01\x00\x00", "OTTO", "true"):
+ raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)")
+ tables = {}
+ for i in range(self.numTables):
+ entry = self.DirectoryEntry()
+ entry.fromFile(self.file)
+ tag = Tag(entry.tag)
+ tables[tag] = entry
+ self.tables = OrderedDict(sorted(tables.items(), key=lambda i: i[1].offset))
+
+ # Load flavor data if any
+ if self.flavor == "woff":
+ self.flavorData = WOFFFlavorData(self)
+
+ def has_key(self, tag):
+ return tag in self.tables
+
+ __contains__ = has_key
+
+ def keys(self):
+ return self.tables.keys()
+
+ def __getitem__(self, tag):
+ """Fetch the raw table data."""
+ entry = self.tables[Tag(tag)]
+ data = entry.loadData(self.file)
+ if self.checkChecksums:
+ if tag == "head":
+ # Beh: we have to special-case the 'head' table.
+ checksum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:])
+ else:
+ checksum = calcChecksum(data)
+ if self.checkChecksums > 1:
+ # Be obnoxious, and barf when it's wrong
+ assert checksum == entry.checkSum, "bad checksum for '%s' table" % tag
+ elif checksum != entry.checkSum:
+ # Be friendly, and just log a warning.
+ log.warning("bad checksum for '%s' table", tag)
+ return data
+
+ def __delitem__(self, tag):
+ del self.tables[Tag(tag)]
+
+ def close(self):
+ self.file.close()
+
+ # We define custom __getstate__ and __setstate__ to make SFNTReader pickle-able
+ # and deepcopy-able. When a TTFont is loaded as lazy=True, SFNTReader holds a
+ # reference to an external file object which is not pickleable. So in __getstate__
+ # we store the file name and current position, and in __setstate__ we reopen the
+ # same named file after unpickling.
+
+ def __getstate__(self):
+ if isinstance(self.file, BytesIO):
+ # BytesIO is already pickleable, return the state unmodified
+ return self.__dict__
+
+ # remove unpickleable file attribute, and only store its name and pos
+ state = self.__dict__.copy()
+ del state["file"]
+ state["_filename"] = self.file.name
+ state["_filepos"] = self.file.tell()
+ return state
+
+ def __setstate__(self, state):
+ if "file" not in state:
+ self.file = open(state.pop("_filename"), "rb")
+ self.file.seek(state.pop("_filepos"))
+ self.__dict__.update(state)
# default compression level for WOFF 1.0 tables and metadata
@@ -159,232 +162,257 @@ USE_ZOPFLI = False
# mapping between zlib's compression levels and zopfli's 'numiterations'.
# Use lower values for files over several MB in size or it will be too slow
ZOPFLI_LEVELS = {
- # 0: 0, # can't do 0 iterations...
- 1: 1,
- 2: 3,
- 3: 5,
- 4: 8,
- 5: 10,
- 6: 15,
- 7: 25,
- 8: 50,
- 9: 100,
+ # 0: 0, # can't do 0 iterations...
+ 1: 1,
+ 2: 3,
+ 3: 5,
+ 4: 8,
+ 5: 10,
+ 6: 15,
+ 7: 25,
+ 8: 50,
+ 9: 100,
}
def compress(data, level=ZLIB_COMPRESSION_LEVEL):
- """ Compress 'data' to Zlib format. If 'USE_ZOPFLI' variable is True,
- zopfli is used instead of the zlib module.
- The compression 'level' must be between 0 and 9. 1 gives best speed,
- 9 gives best compression (0 gives no compression at all).
- The default value is a compromise between speed and compression (6).
- """
- if not (0 <= level <= 9):
- raise ValueError('Bad compression level: %s' % level)
- if not USE_ZOPFLI or level == 0:
- from zlib import compress
- return compress(data, level)
- else:
- from zopfli.zlib import compress
- return compress(data, numiterations=ZOPFLI_LEVELS[level])
+ """Compress 'data' to Zlib format. If 'USE_ZOPFLI' variable is True,
+ zopfli is used instead of the zlib module.
+ The compression 'level' must be between 0 and 9. 1 gives best speed,
+ 9 gives best compression (0 gives no compression at all).
+ The default value is a compromise between speed and compression (6).
+ """
+ if not (0 <= level <= 9):
+ raise ValueError("Bad compression level: %s" % level)
+ if not USE_ZOPFLI or level == 0:
+ from zlib import compress
+ return compress(data, level)
+ else:
+ from zopfli.zlib import compress
-class SFNTWriter(object):
+ return compress(data, numiterations=ZOPFLI_LEVELS[level])
- def __new__(cls, *args, **kwargs):
- """ Return an instance of the SFNTWriter sub-class which is compatible
- with the specified 'flavor'.
- """
- flavor = None
- if kwargs and 'flavor' in kwargs:
- flavor = kwargs['flavor']
- elif args and len(args) > 3:
- flavor = args[3]
- if cls is SFNTWriter:
- if flavor == "woff2":
- # return new WOFF2Writer object
- from fontTools.ttLib.woff2 import WOFF2Writer
- return object.__new__(WOFF2Writer)
- # return default object
- return object.__new__(cls)
-
- def __init__(self, file, numTables, sfntVersion="\000\001\000\000",
- flavor=None, flavorData=None):
- self.file = file
- self.numTables = numTables
- self.sfntVersion = Tag(sfntVersion)
- self.flavor = flavor
- self.flavorData = flavorData
-
- if self.flavor == "woff":
- self.directoryFormat = woffDirectoryFormat
- self.directorySize = woffDirectorySize
- self.DirectoryEntry = WOFFDirectoryEntry
-
- self.signature = "wOFF"
-
- # to calculate WOFF checksum adjustment, we also need the original SFNT offsets
- self.origNextTableOffset = sfntDirectorySize + numTables * sfntDirectoryEntrySize
- else:
- assert not self.flavor, "Unknown flavor '%s'" % self.flavor
- self.directoryFormat = sfntDirectoryFormat
- self.directorySize = sfntDirectorySize
- self.DirectoryEntry = SFNTDirectoryEntry
-
- from fontTools.ttLib import getSearchRange
- self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(numTables, 16)
-
- self.directoryOffset = self.file.tell()
- self.nextTableOffset = self.directoryOffset + self.directorySize + numTables * self.DirectoryEntry.formatSize
- # clear out directory area
- self.file.seek(self.nextTableOffset)
- # make sure we're actually where we want to be. (old cStringIO bug)
- self.file.write(b'\0' * (self.nextTableOffset - self.file.tell()))
- self.tables = OrderedDict()
-
- def setEntry(self, tag, entry):
- if tag in self.tables:
- raise TTLibError("cannot rewrite '%s' table" % tag)
-
- self.tables[tag] = entry
-
- def __setitem__(self, tag, data):
- """Write raw table data to disk."""
- if tag in self.tables:
- raise TTLibError("cannot rewrite '%s' table" % tag)
-
- entry = self.DirectoryEntry()
- entry.tag = tag
- entry.offset = self.nextTableOffset
- if tag == 'head':
- entry.checkSum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:])
- self.headTable = data
- entry.uncompressed = True
- else:
- entry.checkSum = calcChecksum(data)
- entry.saveData(self.file, data)
-
- if self.flavor == "woff":
- entry.origOffset = self.origNextTableOffset
- self.origNextTableOffset += (entry.origLength + 3) & ~3
-
- self.nextTableOffset = self.nextTableOffset + ((entry.length + 3) & ~3)
- # Add NUL bytes to pad the table data to a 4-byte boundary.
- # Don't depend on f.seek() as we need to add the padding even if no
- # subsequent write follows (seek is lazy), ie. after the final table
- # in the font.
- self.file.write(b'\0' * (self.nextTableOffset - self.file.tell()))
- assert self.nextTableOffset == self.file.tell()
-
- self.setEntry(tag, entry)
-
- def __getitem__(self, tag):
- return self.tables[tag]
-
- def close(self):
- """All tables must have been written to disk. Now write the
- directory.
- """
- tables = sorted(self.tables.items())
- if len(tables) != self.numTables:
- raise TTLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(tables)))
-
- if self.flavor == "woff":
- self.signature = b"wOFF"
- self.reserved = 0
-
- self.totalSfntSize = 12
- self.totalSfntSize += 16 * len(tables)
- for tag, entry in tables:
- self.totalSfntSize += (entry.origLength + 3) & ~3
-
- data = self.flavorData if self.flavorData else WOFFFlavorData()
- if data.majorVersion is not None and data.minorVersion is not None:
- self.majorVersion = data.majorVersion
- self.minorVersion = data.minorVersion
- else:
- if hasattr(self, 'headTable'):
- self.majorVersion, self.minorVersion = struct.unpack(">HH", self.headTable[4:8])
- else:
- self.majorVersion = self.minorVersion = 0
- if data.metaData:
- self.metaOrigLength = len(data.metaData)
- self.file.seek(0,2)
- self.metaOffset = self.file.tell()
- compressedMetaData = compress(data.metaData)
- self.metaLength = len(compressedMetaData)
- self.file.write(compressedMetaData)
- else:
- self.metaOffset = self.metaLength = self.metaOrigLength = 0
- if data.privData:
- self.file.seek(0,2)
- off = self.file.tell()
- paddedOff = (off + 3) & ~3
- self.file.write('\0' * (paddedOff - off))
- self.privOffset = self.file.tell()
- self.privLength = len(data.privData)
- self.file.write(data.privData)
- else:
- self.privOffset = self.privLength = 0
-
- self.file.seek(0,2)
- self.length = self.file.tell()
-
- else:
- assert not self.flavor, "Unknown flavor '%s'" % self.flavor
- pass
-
- directory = sstruct.pack(self.directoryFormat, self)
-
- self.file.seek(self.directoryOffset + self.directorySize)
- seenHead = 0
- for tag, entry in tables:
- if tag == "head":
- seenHead = 1
- directory = directory + entry.toString()
- if seenHead:
- self.writeMasterChecksum(directory)
- self.file.seek(self.directoryOffset)
- self.file.write(directory)
-
- def _calcMasterChecksum(self, directory):
- # calculate checkSumAdjustment
- tags = list(self.tables.keys())
- checksums = []
- for i in range(len(tags)):
- checksums.append(self.tables[tags[i]].checkSum)
-
- if self.DirectoryEntry != SFNTDirectoryEntry:
- # Create a SFNT directory for checksum calculation purposes
- from fontTools.ttLib import getSearchRange
- self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(self.numTables, 16)
- directory = sstruct.pack(sfntDirectoryFormat, self)
- tables = sorted(self.tables.items())
- for tag, entry in tables:
- sfntEntry = SFNTDirectoryEntry()
- sfntEntry.tag = entry.tag
- sfntEntry.checkSum = entry.checkSum
- sfntEntry.offset = entry.origOffset
- sfntEntry.length = entry.origLength
- directory = directory + sfntEntry.toString()
-
- directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize
- assert directory_end == len(directory)
-
- checksums.append(calcChecksum(directory))
- checksum = sum(checksums) & 0xffffffff
- # BiboAfba!
- checksumadjustment = (0xB1B0AFBA - checksum) & 0xffffffff
- return checksumadjustment
-
- def writeMasterChecksum(self, directory):
- checksumadjustment = self._calcMasterChecksum(directory)
- # write the checksum to the file
- self.file.seek(self.tables['head'].offset + 8)
- self.file.write(struct.pack(">L", checksumadjustment))
-
- def reordersTables(self):
- return False
+
+class SFNTWriter(object):
+ def __new__(cls, *args, **kwargs):
+ """Return an instance of the SFNTWriter sub-class which is compatible
+ with the specified 'flavor'.
+ """
+ flavor = None
+ if kwargs and "flavor" in kwargs:
+ flavor = kwargs["flavor"]
+ elif args and len(args) > 3:
+ flavor = args[3]
+ if cls is SFNTWriter:
+ if flavor == "woff2":
+ # return new WOFF2Writer object
+ from fontTools.ttLib.woff2 import WOFF2Writer
+
+ return object.__new__(WOFF2Writer)
+ # return default object
+ return object.__new__(cls)
+
+ def __init__(
+ self,
+ file,
+ numTables,
+ sfntVersion="\000\001\000\000",
+ flavor=None,
+ flavorData=None,
+ ):
+ self.file = file
+ self.numTables = numTables
+ self.sfntVersion = Tag(sfntVersion)
+ self.flavor = flavor
+ self.flavorData = flavorData
+
+ if self.flavor == "woff":
+ self.directoryFormat = woffDirectoryFormat
+ self.directorySize = woffDirectorySize
+ self.DirectoryEntry = WOFFDirectoryEntry
+
+ self.signature = "wOFF"
+
+ # to calculate WOFF checksum adjustment, we also need the original SFNT offsets
+ self.origNextTableOffset = (
+ sfntDirectorySize + numTables * sfntDirectoryEntrySize
+ )
+ else:
+ assert not self.flavor, "Unknown flavor '%s'" % self.flavor
+ self.directoryFormat = sfntDirectoryFormat
+ self.directorySize = sfntDirectorySize
+ self.DirectoryEntry = SFNTDirectoryEntry
+
+ from fontTools.ttLib import getSearchRange
+
+ self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(
+ numTables, 16
+ )
+
+ self.directoryOffset = self.file.tell()
+ self.nextTableOffset = (
+ self.directoryOffset
+ + self.directorySize
+ + numTables * self.DirectoryEntry.formatSize
+ )
+ # clear out directory area
+ self.file.seek(self.nextTableOffset)
+ # make sure we're actually where we want to be. (old cStringIO bug)
+ self.file.write(b"\0" * (self.nextTableOffset - self.file.tell()))
+ self.tables = OrderedDict()
+
+ def setEntry(self, tag, entry):
+ if tag in self.tables:
+ raise TTLibError("cannot rewrite '%s' table" % tag)
+
+ self.tables[tag] = entry
+
+ def __setitem__(self, tag, data):
+ """Write raw table data to disk."""
+ if tag in self.tables:
+ raise TTLibError("cannot rewrite '%s' table" % tag)
+
+ entry = self.DirectoryEntry()
+ entry.tag = tag
+ entry.offset = self.nextTableOffset
+ if tag == "head":
+ entry.checkSum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:])
+ self.headTable = data
+ entry.uncompressed = True
+ else:
+ entry.checkSum = calcChecksum(data)
+ entry.saveData(self.file, data)
+
+ if self.flavor == "woff":
+ entry.origOffset = self.origNextTableOffset
+ self.origNextTableOffset += (entry.origLength + 3) & ~3
+
+ self.nextTableOffset = self.nextTableOffset + ((entry.length + 3) & ~3)
+ # Add NUL bytes to pad the table data to a 4-byte boundary.
+ # Don't depend on f.seek() as we need to add the padding even if no
+ # subsequent write follows (seek is lazy), ie. after the final table
+ # in the font.
+ self.file.write(b"\0" * (self.nextTableOffset - self.file.tell()))
+ assert self.nextTableOffset == self.file.tell()
+
+ self.setEntry(tag, entry)
+
+ def __getitem__(self, tag):
+ return self.tables[tag]
+
+ def close(self):
+ """All tables must have been written to disk. Now write the
+ directory.
+ """
+ tables = sorted(self.tables.items())
+ if len(tables) != self.numTables:
+ raise TTLibError(
+ "wrong number of tables; expected %d, found %d"
+ % (self.numTables, len(tables))
+ )
+
+ if self.flavor == "woff":
+ self.signature = b"wOFF"
+ self.reserved = 0
+
+ self.totalSfntSize = 12
+ self.totalSfntSize += 16 * len(tables)
+ for tag, entry in tables:
+ self.totalSfntSize += (entry.origLength + 3) & ~3
+
+ data = self.flavorData if self.flavorData else WOFFFlavorData()
+ if data.majorVersion is not None and data.minorVersion is not None:
+ self.majorVersion = data.majorVersion
+ self.minorVersion = data.minorVersion
+ else:
+ if hasattr(self, "headTable"):
+ self.majorVersion, self.minorVersion = struct.unpack(
+ ">HH", self.headTable[4:8]
+ )
+ else:
+ self.majorVersion = self.minorVersion = 0
+ if data.metaData:
+ self.metaOrigLength = len(data.metaData)
+ self.file.seek(0, 2)
+ self.metaOffset = self.file.tell()
+ compressedMetaData = compress(data.metaData)
+ self.metaLength = len(compressedMetaData)
+ self.file.write(compressedMetaData)
+ else:
+ self.metaOffset = self.metaLength = self.metaOrigLength = 0
+ if data.privData:
+ self.file.seek(0, 2)
+ off = self.file.tell()
+ paddedOff = (off + 3) & ~3
+ self.file.write(b"\0" * (paddedOff - off))
+ self.privOffset = self.file.tell()
+ self.privLength = len(data.privData)
+ self.file.write(data.privData)
+ else:
+ self.privOffset = self.privLength = 0
+
+ self.file.seek(0, 2)
+ self.length = self.file.tell()
+
+ else:
+ assert not self.flavor, "Unknown flavor '%s'" % self.flavor
+ pass
+
+ directory = sstruct.pack(self.directoryFormat, self)
+
+ self.file.seek(self.directoryOffset + self.directorySize)
+ seenHead = 0
+ for tag, entry in tables:
+ if tag == "head":
+ seenHead = 1
+ directory = directory + entry.toString()
+ if seenHead:
+ self.writeMasterChecksum(directory)
+ self.file.seek(self.directoryOffset)
+ self.file.write(directory)
+
+ def _calcMasterChecksum(self, directory):
+ # calculate checkSumAdjustment
+ tags = list(self.tables.keys())
+ checksums = []
+ for i in range(len(tags)):
+ checksums.append(self.tables[tags[i]].checkSum)
+
+ if self.DirectoryEntry != SFNTDirectoryEntry:
+ # Create a SFNT directory for checksum calculation purposes
+ from fontTools.ttLib import getSearchRange
+
+ self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(
+ self.numTables, 16
+ )
+ directory = sstruct.pack(sfntDirectoryFormat, self)
+ tables = sorted(self.tables.items())
+ for tag, entry in tables:
+ sfntEntry = SFNTDirectoryEntry()
+ sfntEntry.tag = entry.tag
+ sfntEntry.checkSum = entry.checkSum
+ sfntEntry.offset = entry.origOffset
+ sfntEntry.length = entry.origLength
+ directory = directory + sfntEntry.toString()
+
+ directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize
+ assert directory_end == len(directory)
+
+ checksums.append(calcChecksum(directory))
+ checksum = sum(checksums) & 0xFFFFFFFF
+ # BiboAfba!
+ checksumadjustment = (0xB1B0AFBA - checksum) & 0xFFFFFFFF
+ return checksumadjustment
+
+ def writeMasterChecksum(self, directory):
+ checksumadjustment = self._calcMasterChecksum(directory)
+ # write the checksum to the file
+ self.file.seek(self.tables["head"].offset + 8)
+ self.file.write(struct.pack(">L", checksumadjustment))
+
+ def reordersTables(self):
+ return False
# -- sfnt directory helpers and cruft
@@ -455,170 +483,179 @@ woffDirectoryEntrySize = sstruct.calcsize(woffDirectoryEntryFormat)
class DirectoryEntry(object):
+ def __init__(self):
+ self.uncompressed = False # if True, always embed entry raw
- def __init__(self):
- self.uncompressed = False # if True, always embed entry raw
+ def fromFile(self, file):
+ sstruct.unpack(self.format, file.read(self.formatSize), self)
- def fromFile(self, file):
- sstruct.unpack(self.format, file.read(self.formatSize), self)
+ def fromString(self, str):
+ sstruct.unpack(self.format, str, self)
- def fromString(self, str):
- sstruct.unpack(self.format, str, self)
+ def toString(self):
+ return sstruct.pack(self.format, self)
- def toString(self):
- return sstruct.pack(self.format, self)
+ def __repr__(self):
+ if hasattr(self, "tag"):
+ return "<%s '%s' at %x>" % (self.__class__.__name__, self.tag, id(self))
+ else:
+ return "<%s at %x>" % (self.__class__.__name__, id(self))
- def __repr__(self):
- if hasattr(self, "tag"):
- return "<%s '%s' at %x>" % (self.__class__.__name__, self.tag, id(self))
- else:
- return "<%s at %x>" % (self.__class__.__name__, id(self))
+ def loadData(self, file):
+ file.seek(self.offset)
+ data = file.read(self.length)
+ assert len(data) == self.length
+ if hasattr(self.__class__, "decodeData"):
+ data = self.decodeData(data)
+ return data
- def loadData(self, file):
- file.seek(self.offset)
- data = file.read(self.length)
- assert len(data) == self.length
- if hasattr(self.__class__, 'decodeData'):
- data = self.decodeData(data)
- return data
+ def saveData(self, file, data):
+ if hasattr(self.__class__, "encodeData"):
+ data = self.encodeData(data)
+ self.length = len(data)
+ file.seek(self.offset)
+ file.write(data)
- def saveData(self, file, data):
- if hasattr(self.__class__, 'encodeData'):
- data = self.encodeData(data)
- self.length = len(data)
- file.seek(self.offset)
- file.write(data)
+ def decodeData(self, rawData):
+ return rawData
- def decodeData(self, rawData):
- return rawData
+ def encodeData(self, data):
+ return data
- def encodeData(self, data):
- return data
class SFNTDirectoryEntry(DirectoryEntry):
+ format = sfntDirectoryEntryFormat
+ formatSize = sfntDirectoryEntrySize
- format = sfntDirectoryEntryFormat
- formatSize = sfntDirectoryEntrySize
class WOFFDirectoryEntry(DirectoryEntry):
-
- format = woffDirectoryEntryFormat
- formatSize = woffDirectoryEntrySize
-
- def __init__(self):
- super(WOFFDirectoryEntry, self).__init__()
- # With fonttools<=3.1.2, the only way to set a different zlib
- # compression level for WOFF directory entries was to set the class
- # attribute 'zlibCompressionLevel'. This is now replaced by a globally
- # defined `ZLIB_COMPRESSION_LEVEL`, which is also applied when
- # compressing the metadata. For backward compatibility, we still
- # use the class attribute if it was already set.
- if not hasattr(WOFFDirectoryEntry, 'zlibCompressionLevel'):
- self.zlibCompressionLevel = ZLIB_COMPRESSION_LEVEL
-
- def decodeData(self, rawData):
- import zlib
- if self.length == self.origLength:
- data = rawData
- else:
- assert self.length < self.origLength
- data = zlib.decompress(rawData)
- assert len(data) == self.origLength
- return data
-
- def encodeData(self, data):
- self.origLength = len(data)
- if not self.uncompressed:
- compressedData = compress(data, self.zlibCompressionLevel)
- if self.uncompressed or len(compressedData) >= self.origLength:
- # Encode uncompressed
- rawData = data
- self.length = self.origLength
- else:
- rawData = compressedData
- self.length = len(rawData)
- return rawData
-
-class WOFFFlavorData():
-
- Flavor = 'woff'
-
- def __init__(self, reader=None):
- self.majorVersion = None
- self.minorVersion = None
- self.metaData = None
- self.privData = None
- if reader:
- self.majorVersion = reader.majorVersion
- self.minorVersion = reader.minorVersion
- if reader.metaLength:
- reader.file.seek(reader.metaOffset)
- rawData = reader.file.read(reader.metaLength)
- assert len(rawData) == reader.metaLength
- data = self._decompress(rawData)
- assert len(data) == reader.metaOrigLength
- self.metaData = data
- if reader.privLength:
- reader.file.seek(reader.privOffset)
- data = reader.file.read(reader.privLength)
- assert len(data) == reader.privLength
- self.privData = data
-
- def _decompress(self, rawData):
- import zlib
- return zlib.decompress(rawData)
+ format = woffDirectoryEntryFormat
+ formatSize = woffDirectoryEntrySize
+
+ def __init__(self):
+ super(WOFFDirectoryEntry, self).__init__()
+ # With fonttools<=3.1.2, the only way to set a different zlib
+ # compression level for WOFF directory entries was to set the class
+ # attribute 'zlibCompressionLevel'. This is now replaced by a globally
+ # defined `ZLIB_COMPRESSION_LEVEL`, which is also applied when
+ # compressing the metadata. For backward compatibility, we still
+ # use the class attribute if it was already set.
+ if not hasattr(WOFFDirectoryEntry, "zlibCompressionLevel"):
+ self.zlibCompressionLevel = ZLIB_COMPRESSION_LEVEL
+
+ def decodeData(self, rawData):
+ import zlib
+
+ if self.length == self.origLength:
+ data = rawData
+ else:
+ assert self.length < self.origLength
+ data = zlib.decompress(rawData)
+ assert len(data) == self.origLength
+ return data
+
+ def encodeData(self, data):
+ self.origLength = len(data)
+ if not self.uncompressed:
+ compressedData = compress(data, self.zlibCompressionLevel)
+ if self.uncompressed or len(compressedData) >= self.origLength:
+ # Encode uncompressed
+ rawData = data
+ self.length = self.origLength
+ else:
+ rawData = compressedData
+ self.length = len(rawData)
+ return rawData
+
+
+class WOFFFlavorData:
+ Flavor = "woff"
+
+ def __init__(self, reader=None):
+ self.majorVersion = None
+ self.minorVersion = None
+ self.metaData = None
+ self.privData = None
+ if reader:
+ self.majorVersion = reader.majorVersion
+ self.minorVersion = reader.minorVersion
+ if reader.metaLength:
+ reader.file.seek(reader.metaOffset)
+ rawData = reader.file.read(reader.metaLength)
+ assert len(rawData) == reader.metaLength
+ data = self._decompress(rawData)
+ assert len(data) == reader.metaOrigLength
+ self.metaData = data
+ if reader.privLength:
+ reader.file.seek(reader.privOffset)
+ data = reader.file.read(reader.privLength)
+ assert len(data) == reader.privLength
+ self.privData = data
+
+ def _decompress(self, rawData):
+ import zlib
+
+ return zlib.decompress(rawData)
def calcChecksum(data):
- """Calculate the checksum for an arbitrary block of data.
-
- If the data length is not a multiple of four, it assumes
- it is to be padded with null byte.
-
- >>> print(calcChecksum(b"abcd"))
- 1633837924
- >>> print(calcChecksum(b"abcdxyz"))
- 3655064932
- """
- remainder = len(data) % 4
- if remainder:
- data += b"\0" * (4 - remainder)
- value = 0
- blockSize = 4096
- assert blockSize % 4 == 0
- for i in range(0, len(data), blockSize):
- block = data[i:i+blockSize]
- longs = struct.unpack(">%dL" % (len(block) // 4), block)
- value = (value + sum(longs)) & 0xffffffff
- return value
+ """Calculate the checksum for an arbitrary block of data.
+
+ If the data length is not a multiple of four, it assumes
+ it is to be padded with null byte.
+
+ >>> print(calcChecksum(b"abcd"))
+ 1633837924
+ >>> print(calcChecksum(b"abcdxyz"))
+ 3655064932
+ """
+ remainder = len(data) % 4
+ if remainder:
+ data += b"\0" * (4 - remainder)
+ value = 0
+ blockSize = 4096
+ assert blockSize % 4 == 0
+ for i in range(0, len(data), blockSize):
+ block = data[i : i + blockSize]
+ longs = struct.unpack(">%dL" % (len(block) // 4), block)
+ value = (value + sum(longs)) & 0xFFFFFFFF
+ return value
+
def readTTCHeader(file):
- file.seek(0)
- data = file.read(ttcHeaderSize)
- if len(data) != ttcHeaderSize:
- raise TTLibError("Not a Font Collection (not enough data)")
- self = SimpleNamespace()
- sstruct.unpack(ttcHeaderFormat, data, self)
- if self.TTCTag != "ttcf":
- raise TTLibError("Not a Font Collection")
- assert self.Version == 0x00010000 or self.Version == 0x00020000, "unrecognized TTC version 0x%08x" % self.Version
- self.offsetTable = struct.unpack(">%dL" % self.numFonts, file.read(self.numFonts * 4))
- if self.Version == 0x00020000:
- pass # ignoring version 2.0 signatures
- return self
+ file.seek(0)
+ data = file.read(ttcHeaderSize)
+ if len(data) != ttcHeaderSize:
+ raise TTLibError("Not a Font Collection (not enough data)")
+ self = SimpleNamespace()
+ sstruct.unpack(ttcHeaderFormat, data, self)
+ if self.TTCTag != "ttcf":
+ raise TTLibError("Not a Font Collection")
+ assert self.Version == 0x00010000 or self.Version == 0x00020000, (
+ "unrecognized TTC version 0x%08x" % self.Version
+ )
+ self.offsetTable = struct.unpack(
+ ">%dL" % self.numFonts, file.read(self.numFonts * 4)
+ )
+ if self.Version == 0x00020000:
+ pass # ignoring version 2.0 signatures
+ return self
+
def writeTTCHeader(file, numFonts):
- self = SimpleNamespace()
- self.TTCTag = 'ttcf'
- self.Version = 0x00010000
- self.numFonts = numFonts
- file.seek(0)
- file.write(sstruct.pack(ttcHeaderFormat, self))
- offset = file.tell()
- file.write(struct.pack(">%dL" % self.numFonts, *([0] * self.numFonts)))
- return offset
+ self = SimpleNamespace()
+ self.TTCTag = "ttcf"
+ self.Version = 0x00010000
+ self.numFonts = numFonts
+ file.seek(0)
+ file.write(sstruct.pack(ttcHeaderFormat, self))
+ offset = file.tell()
+ file.write(struct.pack(">%dL" % self.numFonts, *([0] * self.numFonts)))
+ return offset
+
if __name__ == "__main__":
- import sys
- import doctest
- sys.exit(doctest.testmod().failed)
+ import sys
+ import doctest
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/ttLib/standardGlyphOrder.py b/Lib/fontTools/ttLib/standardGlyphOrder.py
index 1f980e45..40623852 100644
--- a/Lib/fontTools/ttLib/standardGlyphOrder.py
+++ b/Lib/fontTools/ttLib/standardGlyphOrder.py
@@ -2,270 +2,270 @@
# 'post' table formats 1.0 and 2.0 rely on this list of "standard"
# glyphs.
#
-# My list is correct according to the Apple documentation for the 'post'
-# table: http://developer.apple.com/fonts/TTRefMan/RM06/Chap6post.html
+# My list is correct according to the Apple documentation for the 'post' table:
+# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6post.html
# (However, it seems that TTFdump (from MS) and FontLab disagree, at
# least with respect to the last glyph, which they list as 'dslash'
# instead of 'dcroat'.)
#
standardGlyphOrder = [
- ".notdef", # 0
- ".null", # 1
- "nonmarkingreturn", # 2
- "space", # 3
- "exclam", # 4
- "quotedbl", # 5
- "numbersign", # 6
- "dollar", # 7
- "percent", # 8
- "ampersand", # 9
- "quotesingle", # 10
- "parenleft", # 11
- "parenright", # 12
- "asterisk", # 13
- "plus", # 14
- "comma", # 15
- "hyphen", # 16
- "period", # 17
- "slash", # 18
- "zero", # 19
- "one", # 20
- "two", # 21
- "three", # 22
- "four", # 23
- "five", # 24
- "six", # 25
- "seven", # 26
- "eight", # 27
- "nine", # 28
- "colon", # 29
- "semicolon", # 30
- "less", # 31
- "equal", # 32
- "greater", # 33
- "question", # 34
- "at", # 35
- "A", # 36
- "B", # 37
- "C", # 38
- "D", # 39
- "E", # 40
- "F", # 41
- "G", # 42
- "H", # 43
- "I", # 44
- "J", # 45
- "K", # 46
- "L", # 47
- "M", # 48
- "N", # 49
- "O", # 50
- "P", # 51
- "Q", # 52
- "R", # 53
- "S", # 54
- "T", # 55
- "U", # 56
- "V", # 57
- "W", # 58
- "X", # 59
- "Y", # 60
- "Z", # 61
- "bracketleft", # 62
- "backslash", # 63
- "bracketright", # 64
- "asciicircum", # 65
- "underscore", # 66
- "grave", # 67
- "a", # 68
- "b", # 69
- "c", # 70
- "d", # 71
- "e", # 72
- "f", # 73
- "g", # 74
- "h", # 75
- "i", # 76
- "j", # 77
- "k", # 78
- "l", # 79
- "m", # 80
- "n", # 81
- "o", # 82
- "p", # 83
- "q", # 84
- "r", # 85
- "s", # 86
- "t", # 87
- "u", # 88
- "v", # 89
- "w", # 90
- "x", # 91
- "y", # 92
- "z", # 93
- "braceleft", # 94
- "bar", # 95
- "braceright", # 96
- "asciitilde", # 97
- "Adieresis", # 98
- "Aring", # 99
- "Ccedilla", # 100
- "Eacute", # 101
- "Ntilde", # 102
- "Odieresis", # 103
- "Udieresis", # 104
- "aacute", # 105
- "agrave", # 106
- "acircumflex", # 107
- "adieresis", # 108
- "atilde", # 109
- "aring", # 110
- "ccedilla", # 111
- "eacute", # 112
- "egrave", # 113
- "ecircumflex", # 114
- "edieresis", # 115
- "iacute", # 116
- "igrave", # 117
- "icircumflex", # 118
- "idieresis", # 119
- "ntilde", # 120
- "oacute", # 121
- "ograve", # 122
- "ocircumflex", # 123
- "odieresis", # 124
- "otilde", # 125
- "uacute", # 126
- "ugrave", # 127
- "ucircumflex", # 128
- "udieresis", # 129
- "dagger", # 130
- "degree", # 131
- "cent", # 132
- "sterling", # 133
- "section", # 134
- "bullet", # 135
- "paragraph", # 136
- "germandbls", # 137
- "registered", # 138
- "copyright", # 139
- "trademark", # 140
- "acute", # 141
- "dieresis", # 142
- "notequal", # 143
- "AE", # 144
- "Oslash", # 145
- "infinity", # 146
- "plusminus", # 147
- "lessequal", # 148
- "greaterequal", # 149
- "yen", # 150
- "mu", # 151
- "partialdiff", # 152
- "summation", # 153
- "product", # 154
- "pi", # 155
- "integral", # 156
- "ordfeminine", # 157
- "ordmasculine", # 158
- "Omega", # 159
- "ae", # 160
- "oslash", # 161
- "questiondown", # 162
- "exclamdown", # 163
- "logicalnot", # 164
- "radical", # 165
- "florin", # 166
- "approxequal", # 167
- "Delta", # 168
- "guillemotleft", # 169
- "guillemotright", # 170
- "ellipsis", # 171
- "nonbreakingspace", # 172
- "Agrave", # 173
- "Atilde", # 174
- "Otilde", # 175
- "OE", # 176
- "oe", # 177
- "endash", # 178
- "emdash", # 179
- "quotedblleft", # 180
- "quotedblright", # 181
- "quoteleft", # 182
- "quoteright", # 183
- "divide", # 184
- "lozenge", # 185
- "ydieresis", # 186
- "Ydieresis", # 187
- "fraction", # 188
- "currency", # 189
- "guilsinglleft", # 190
- "guilsinglright", # 191
- "fi", # 192
- "fl", # 193
- "daggerdbl", # 194
- "periodcentered", # 195
- "quotesinglbase", # 196
- "quotedblbase", # 197
- "perthousand", # 198
- "Acircumflex", # 199
- "Ecircumflex", # 200
- "Aacute", # 201
- "Edieresis", # 202
- "Egrave", # 203
- "Iacute", # 204
- "Icircumflex", # 205
- "Idieresis", # 206
- "Igrave", # 207
- "Oacute", # 208
- "Ocircumflex", # 209
- "apple", # 210
- "Ograve", # 211
- "Uacute", # 212
- "Ucircumflex", # 213
- "Ugrave", # 214
- "dotlessi", # 215
- "circumflex", # 216
- "tilde", # 217
- "macron", # 218
- "breve", # 219
- "dotaccent", # 220
- "ring", # 221
- "cedilla", # 222
- "hungarumlaut", # 223
- "ogonek", # 224
- "caron", # 225
- "Lslash", # 226
- "lslash", # 227
- "Scaron", # 228
- "scaron", # 229
- "Zcaron", # 230
- "zcaron", # 231
- "brokenbar", # 232
- "Eth", # 233
- "eth", # 234
- "Yacute", # 235
- "yacute", # 236
- "Thorn", # 237
- "thorn", # 238
- "minus", # 239
- "multiply", # 240
- "onesuperior", # 241
- "twosuperior", # 242
- "threesuperior", # 243
- "onehalf", # 244
- "onequarter", # 245
- "threequarters", # 246
- "franc", # 247
- "Gbreve", # 248
- "gbreve", # 249
- "Idotaccent", # 250
- "Scedilla", # 251
- "scedilla", # 252
- "Cacute", # 253
- "cacute", # 254
- "Ccaron", # 255
- "ccaron", # 256
- "dcroat" # 257
+ ".notdef", # 0
+ ".null", # 1
+ "nonmarkingreturn", # 2
+ "space", # 3
+ "exclam", # 4
+ "quotedbl", # 5
+ "numbersign", # 6
+ "dollar", # 7
+ "percent", # 8
+ "ampersand", # 9
+ "quotesingle", # 10
+ "parenleft", # 11
+ "parenright", # 12
+ "asterisk", # 13
+ "plus", # 14
+ "comma", # 15
+ "hyphen", # 16
+ "period", # 17
+ "slash", # 18
+ "zero", # 19
+ "one", # 20
+ "two", # 21
+ "three", # 22
+ "four", # 23
+ "five", # 24
+ "six", # 25
+ "seven", # 26
+ "eight", # 27
+ "nine", # 28
+ "colon", # 29
+ "semicolon", # 30
+ "less", # 31
+ "equal", # 32
+ "greater", # 33
+ "question", # 34
+ "at", # 35
+ "A", # 36
+ "B", # 37
+ "C", # 38
+ "D", # 39
+ "E", # 40
+ "F", # 41
+ "G", # 42
+ "H", # 43
+ "I", # 44
+ "J", # 45
+ "K", # 46
+ "L", # 47
+ "M", # 48
+ "N", # 49
+ "O", # 50
+ "P", # 51
+ "Q", # 52
+ "R", # 53
+ "S", # 54
+ "T", # 55
+ "U", # 56
+ "V", # 57
+ "W", # 58
+ "X", # 59
+ "Y", # 60
+ "Z", # 61
+ "bracketleft", # 62
+ "backslash", # 63
+ "bracketright", # 64
+ "asciicircum", # 65
+ "underscore", # 66
+ "grave", # 67
+ "a", # 68
+ "b", # 69
+ "c", # 70
+ "d", # 71
+ "e", # 72
+ "f", # 73
+ "g", # 74
+ "h", # 75
+ "i", # 76
+ "j", # 77
+ "k", # 78
+ "l", # 79
+ "m", # 80
+ "n", # 81
+ "o", # 82
+ "p", # 83
+ "q", # 84
+ "r", # 85
+ "s", # 86
+ "t", # 87
+ "u", # 88
+ "v", # 89
+ "w", # 90
+ "x", # 91
+ "y", # 92
+ "z", # 93
+ "braceleft", # 94
+ "bar", # 95
+ "braceright", # 96
+ "asciitilde", # 97
+ "Adieresis", # 98
+ "Aring", # 99
+ "Ccedilla", # 100
+ "Eacute", # 101
+ "Ntilde", # 102
+ "Odieresis", # 103
+ "Udieresis", # 104
+ "aacute", # 105
+ "agrave", # 106
+ "acircumflex", # 107
+ "adieresis", # 108
+ "atilde", # 109
+ "aring", # 110
+ "ccedilla", # 111
+ "eacute", # 112
+ "egrave", # 113
+ "ecircumflex", # 114
+ "edieresis", # 115
+ "iacute", # 116
+ "igrave", # 117
+ "icircumflex", # 118
+ "idieresis", # 119
+ "ntilde", # 120
+ "oacute", # 121
+ "ograve", # 122
+ "ocircumflex", # 123
+ "odieresis", # 124
+ "otilde", # 125
+ "uacute", # 126
+ "ugrave", # 127
+ "ucircumflex", # 128
+ "udieresis", # 129
+ "dagger", # 130
+ "degree", # 131
+ "cent", # 132
+ "sterling", # 133
+ "section", # 134
+ "bullet", # 135
+ "paragraph", # 136
+ "germandbls", # 137
+ "registered", # 138
+ "copyright", # 139
+ "trademark", # 140
+ "acute", # 141
+ "dieresis", # 142
+ "notequal", # 143
+ "AE", # 144
+ "Oslash", # 145
+ "infinity", # 146
+ "plusminus", # 147
+ "lessequal", # 148
+ "greaterequal", # 149
+ "yen", # 150
+ "mu", # 151
+ "partialdiff", # 152
+ "summation", # 153
+ "product", # 154
+ "pi", # 155
+ "integral", # 156
+ "ordfeminine", # 157
+ "ordmasculine", # 158
+ "Omega", # 159
+ "ae", # 160
+ "oslash", # 161
+ "questiondown", # 162
+ "exclamdown", # 163
+ "logicalnot", # 164
+ "radical", # 165
+ "florin", # 166
+ "approxequal", # 167
+ "Delta", # 168
+ "guillemotleft", # 169
+ "guillemotright", # 170
+ "ellipsis", # 171
+ "nonbreakingspace", # 172
+ "Agrave", # 173
+ "Atilde", # 174
+ "Otilde", # 175
+ "OE", # 176
+ "oe", # 177
+ "endash", # 178
+ "emdash", # 179
+ "quotedblleft", # 180
+ "quotedblright", # 181
+ "quoteleft", # 182
+ "quoteright", # 183
+ "divide", # 184
+ "lozenge", # 185
+ "ydieresis", # 186
+ "Ydieresis", # 187
+ "fraction", # 188
+ "currency", # 189
+ "guilsinglleft", # 190
+ "guilsinglright", # 191
+ "fi", # 192
+ "fl", # 193
+ "daggerdbl", # 194
+ "periodcentered", # 195
+ "quotesinglbase", # 196
+ "quotedblbase", # 197
+ "perthousand", # 198
+ "Acircumflex", # 199
+ "Ecircumflex", # 200
+ "Aacute", # 201
+ "Edieresis", # 202
+ "Egrave", # 203
+ "Iacute", # 204
+ "Icircumflex", # 205
+ "Idieresis", # 206
+ "Igrave", # 207
+ "Oacute", # 208
+ "Ocircumflex", # 209
+ "apple", # 210
+ "Ograve", # 211
+ "Uacute", # 212
+ "Ucircumflex", # 213
+ "Ugrave", # 214
+ "dotlessi", # 215
+ "circumflex", # 216
+ "tilde", # 217
+ "macron", # 218
+ "breve", # 219
+ "dotaccent", # 220
+ "ring", # 221
+ "cedilla", # 222
+ "hungarumlaut", # 223
+ "ogonek", # 224
+ "caron", # 225
+ "Lslash", # 226
+ "lslash", # 227
+ "Scaron", # 228
+ "scaron", # 229
+ "Zcaron", # 230
+ "zcaron", # 231
+ "brokenbar", # 232
+ "Eth", # 233
+ "eth", # 234
+ "Yacute", # 235
+ "yacute", # 236
+ "Thorn", # 237
+ "thorn", # 238
+ "minus", # 239
+ "multiply", # 240
+ "onesuperior", # 241
+ "twosuperior", # 242
+ "threesuperior", # 243
+ "onehalf", # 244
+ "onequarter", # 245
+ "threequarters", # 246
+ "franc", # 247
+ "Gbreve", # 248
+ "gbreve", # 249
+ "Idotaccent", # 250
+ "Scedilla", # 251
+ "scedilla", # 252
+ "Cacute", # 253
+ "cacute", # 254
+ "Ccaron", # 255
+ "ccaron", # 256
+ "dcroat", # 257
]
diff --git a/Lib/fontTools/ttLib/tables/B_A_S_E_.py b/Lib/fontTools/ttLib/tables/B_A_S_E_.py
index 9551e2c6..f468a963 100644
--- a/Lib/fontTools/ttLib/tables/B_A_S_E_.py
+++ b/Lib/fontTools/ttLib/tables/B_A_S_E_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_B_A_S_E_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py b/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py
index 9197923d..10b4f828 100644
--- a/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py
+++ b/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py
@@ -28,32 +28,37 @@ smallGlyphMetricsFormat = """
Advance: B
"""
+
class BitmapGlyphMetrics(object):
+ def toXML(self, writer, ttFont):
+ writer.begintag(self.__class__.__name__)
+ writer.newline()
+ for metricName in sstruct.getformat(self.__class__.binaryFormat)[1]:
+ writer.simpletag(metricName, value=getattr(self, metricName))
+ writer.newline()
+ writer.endtag(self.__class__.__name__)
+ writer.newline()
- def toXML(self, writer, ttFont):
- writer.begintag(self.__class__.__name__)
- writer.newline()
- for metricName in sstruct.getformat(self.__class__.binaryFormat)[1]:
- writer.simpletag(metricName, value=getattr(self, metricName))
- writer.newline()
- writer.endtag(self.__class__.__name__)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- metricNames = set(sstruct.getformat(self.__class__.binaryFormat)[1])
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- # Make sure this is a metric that is needed by GlyphMetrics.
- if name in metricNames:
- vars(self)[name] = safeEval(attrs['value'])
- else:
- log.warning("unknown name '%s' being ignored in %s.", name, self.__class__.__name__)
+ def fromXML(self, name, attrs, content, ttFont):
+ metricNames = set(sstruct.getformat(self.__class__.binaryFormat)[1])
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ # Make sure this is a metric that is needed by GlyphMetrics.
+ if name in metricNames:
+ vars(self)[name] = safeEval(attrs["value"])
+ else:
+ log.warning(
+ "unknown name '%s' being ignored in %s.",
+ name,
+ self.__class__.__name__,
+ )
class BigGlyphMetrics(BitmapGlyphMetrics):
- binaryFormat = bigGlyphMetricsFormat
+ binaryFormat = bigGlyphMetricsFormat
+
class SmallGlyphMetrics(BitmapGlyphMetrics):
- binaryFormat = smallGlyphMetricsFormat
+ binaryFormat = smallGlyphMetricsFormat
diff --git a/Lib/fontTools/ttLib/tables/C_B_D_T_.py b/Lib/fontTools/ttLib/tables/C_B_D_T_.py
index adf5447f..2b87ac86 100644
--- a/Lib/fontTools/ttLib/tables/C_B_D_T_.py
+++ b/Lib/fontTools/ttLib/tables/C_B_D_T_.py
@@ -6,87 +6,98 @@
from fontTools.misc.textTools import bytesjoin
from fontTools.misc import sstruct
from . import E_B_D_T_
-from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
-from .E_B_D_T_ import BitmapGlyph, BitmapPlusSmallMetricsMixin, BitmapPlusBigMetricsMixin
+from .BitmapGlyphMetrics import (
+ BigGlyphMetrics,
+ bigGlyphMetricsFormat,
+ SmallGlyphMetrics,
+ smallGlyphMetricsFormat,
+)
+from .E_B_D_T_ import (
+ BitmapGlyph,
+ BitmapPlusSmallMetricsMixin,
+ BitmapPlusBigMetricsMixin,
+)
import struct
+
class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_):
+ # Change the data locator table being referenced.
+ locatorName = "CBLC"
- # Change the data locator table being referenced.
- locatorName = 'CBLC'
+ # Modify the format class accessor for color bitmap use.
+ def getImageFormatClass(self, imageFormat):
+ try:
+ return E_B_D_T_.table_E_B_D_T_.getImageFormatClass(self, imageFormat)
+ except KeyError:
+ return cbdt_bitmap_classes[imageFormat]
- # Modify the format class accessor for color bitmap use.
- def getImageFormatClass(self, imageFormat):
- try:
- return E_B_D_T_.table_E_B_D_T_.getImageFormatClass(self, imageFormat)
- except KeyError:
- return cbdt_bitmap_classes[imageFormat]
# Helper method for removing export features not supported by color bitmaps.
# Write data in the parent class will default to raw if an option is unsupported.
def _removeUnsupportedForColor(dataFunctions):
- dataFunctions = dict(dataFunctions)
- del dataFunctions['row']
- return dataFunctions
+ dataFunctions = dict(dataFunctions)
+ del dataFunctions["row"]
+ return dataFunctions
+
class ColorBitmapGlyph(BitmapGlyph):
+ fileExtension = ".png"
+ xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions)
- fileExtension = '.png'
- xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions)
class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph):
+ def decompile(self):
+ self.metrics = SmallGlyphMetrics()
+ dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
+ (dataLen,) = struct.unpack(">L", data[:4])
+ data = data[4:]
- def decompile(self):
- self.metrics = SmallGlyphMetrics()
- dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
- (dataLen,) = struct.unpack(">L", data[:4])
- data = data[4:]
+ # For the image data cut it to the size specified by dataLen.
+ assert dataLen <= len(data), "Data overun in format 17"
+ self.imageData = data[:dataLen]
- # For the image data cut it to the size specified by dataLen.
- assert dataLen <= len(data), "Data overun in format 17"
- self.imageData = data[:dataLen]
+ def compile(self, ttFont):
+ dataList = []
+ dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
+ dataList.append(struct.pack(">L", len(self.imageData)))
+ dataList.append(self.imageData)
+ return bytesjoin(dataList)
- def compile(self, ttFont):
- dataList = []
- dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
- dataList.append(struct.pack(">L", len(self.imageData)))
- dataList.append(self.imageData)
- return bytesjoin(dataList)
class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph):
+ def decompile(self):
+ self.metrics = BigGlyphMetrics()
+ dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
+ (dataLen,) = struct.unpack(">L", data[:4])
+ data = data[4:]
- def decompile(self):
- self.metrics = BigGlyphMetrics()
- dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
- (dataLen,) = struct.unpack(">L", data[:4])
- data = data[4:]
+ # For the image data cut it to the size specified by dataLen.
+ assert dataLen <= len(data), "Data overun in format 18"
+ self.imageData = data[:dataLen]
- # For the image data cut it to the size specified by dataLen.
- assert dataLen <= len(data), "Data overun in format 18"
- self.imageData = data[:dataLen]
+ def compile(self, ttFont):
+ dataList = []
+ dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
+ dataList.append(struct.pack(">L", len(self.imageData)))
+ dataList.append(self.imageData)
+ return bytesjoin(dataList)
- def compile(self, ttFont):
- dataList = []
- dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
- dataList.append(struct.pack(">L", len(self.imageData)))
- dataList.append(self.imageData)
- return bytesjoin(dataList)
class cbdt_bitmap_format_19(ColorBitmapGlyph):
+ def decompile(self):
+ (dataLen,) = struct.unpack(">L", self.data[:4])
+ data = self.data[4:]
- def decompile(self):
- (dataLen,) = struct.unpack(">L", self.data[:4])
- data = self.data[4:]
+ assert dataLen <= len(data), "Data overun in format 19"
+ self.imageData = data[:dataLen]
- assert dataLen <= len(data), "Data overun in format 19"
- self.imageData = data[:dataLen]
+ def compile(self, ttFont):
+ return struct.pack(">L", len(self.imageData)) + self.imageData
- def compile(self, ttFont):
- return struct.pack(">L", len(self.imageData)) + self.imageData
# Dict for CBDT extended formats.
cbdt_bitmap_classes = {
- 17: cbdt_bitmap_format_17,
- 18: cbdt_bitmap_format_18,
- 19: cbdt_bitmap_format_19,
+ 17: cbdt_bitmap_format_17,
+ 18: cbdt_bitmap_format_18,
+ 19: cbdt_bitmap_format_19,
}
diff --git a/Lib/fontTools/ttLib/tables/C_B_L_C_.py b/Lib/fontTools/ttLib/tables/C_B_L_C_.py
index 2f785710..fc3974ec 100644
--- a/Lib/fontTools/ttLib/tables/C_B_L_C_.py
+++ b/Lib/fontTools/ttLib/tables/C_B_L_C_.py
@@ -4,6 +4,6 @@
from . import E_B_L_C_
-class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_):
- dependencies = ['CBDT']
+class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_):
+ dependencies = ["CBDT"]
diff --git a/Lib/fontTools/ttLib/tables/C_F_F_.py b/Lib/fontTools/ttLib/tables/C_F_F_.py
index d12b89d2..c231599e 100644
--- a/Lib/fontTools/ttLib/tables/C_F_F_.py
+++ b/Lib/fontTools/ttLib/tables/C_F_F_.py
@@ -4,43 +4,43 @@ from . import DefaultTable
class table_C_F_F_(DefaultTable.DefaultTable):
-
- def __init__(self, tag=None):
- DefaultTable.DefaultTable.__init__(self, tag)
- self.cff = cffLib.CFFFontSet()
- self._gaveGlyphOrder = False
-
- def decompile(self, data, otFont):
- self.cff.decompile(BytesIO(data), otFont, isCFF2=False)
- assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
-
- def compile(self, otFont):
- f = BytesIO()
- self.cff.compile(f, otFont, isCFF2=False)
- return f.getvalue()
-
- def haveGlyphNames(self):
- if hasattr(self.cff[self.cff.fontNames[0]], "ROS"):
- return False # CID-keyed font
- else:
- return True
-
- def getGlyphOrder(self):
- if self._gaveGlyphOrder:
- from fontTools import ttLib
- raise ttLib.TTLibError("illegal use of getGlyphOrder()")
- self._gaveGlyphOrder = True
- return self.cff[self.cff.fontNames[0]].getGlyphOrder()
-
- def setGlyphOrder(self, glyphOrder):
- pass
- # XXX
- #self.cff[self.cff.fontNames[0]].setGlyphOrder(glyphOrder)
-
- def toXML(self, writer, otFont):
- self.cff.toXML(writer)
-
- def fromXML(self, name, attrs, content, otFont):
- if not hasattr(self, "cff"):
- self.cff = cffLib.CFFFontSet()
- self.cff.fromXML(name, attrs, content, otFont)
+ def __init__(self, tag=None):
+ DefaultTable.DefaultTable.__init__(self, tag)
+ self.cff = cffLib.CFFFontSet()
+ self._gaveGlyphOrder = False
+
+ def decompile(self, data, otFont):
+ self.cff.decompile(BytesIO(data), otFont, isCFF2=False)
+ assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
+
+ def compile(self, otFont):
+ f = BytesIO()
+ self.cff.compile(f, otFont, isCFF2=False)
+ return f.getvalue()
+
+ def haveGlyphNames(self):
+ if hasattr(self.cff[self.cff.fontNames[0]], "ROS"):
+ return False # CID-keyed font
+ else:
+ return True
+
+ def getGlyphOrder(self):
+ if self._gaveGlyphOrder:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError("illegal use of getGlyphOrder()")
+ self._gaveGlyphOrder = True
+ return self.cff[self.cff.fontNames[0]].getGlyphOrder()
+
+ def setGlyphOrder(self, glyphOrder):
+ pass
+ # XXX
+ # self.cff[self.cff.fontNames[0]].setGlyphOrder(glyphOrder)
+
+ def toXML(self, writer, otFont):
+ self.cff.toXML(writer)
+
+ def fromXML(self, name, attrs, content, otFont):
+ if not hasattr(self, "cff"):
+ self.cff = cffLib.CFFFontSet()
+ self.cff.fromXML(name, attrs, content, otFont)
diff --git a/Lib/fontTools/ttLib/tables/C_F_F__2.py b/Lib/fontTools/ttLib/tables/C_F_F__2.py
index 6217ebba..edbb0b92 100644
--- a/Lib/fontTools/ttLib/tables/C_F_F__2.py
+++ b/Lib/fontTools/ttLib/tables/C_F_F__2.py
@@ -3,7 +3,6 @@ from fontTools.ttLib.tables.C_F_F_ import table_C_F_F_
class table_C_F_F__2(table_C_F_F_):
-
def decompile(self, data, otFont):
self.cff.decompile(BytesIO(data), otFont, isCFF2=True)
assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
diff --git a/Lib/fontTools/ttLib/tables/C_O_L_R_.py b/Lib/fontTools/ttLib/tables/C_O_L_R_.py
index 3528bf5b..2f03ec05 100644
--- a/Lib/fontTools/ttLib/tables/C_O_L_R_.py
+++ b/Lib/fontTools/ttLib/tables/C_O_L_R_.py
@@ -8,153 +8,151 @@ from . import DefaultTable
class table_C_O_L_R_(DefaultTable.DefaultTable):
- """ This table is structured so that you can treat it like a dictionary keyed by glyph name.
-
- ``ttFont['COLR'][<glyphName>]`` will return the color layers for any glyph.
-
- ``ttFont['COLR'][<glyphName>] = <value>`` will set the color layers for any glyph.
- """
-
- @staticmethod
- def _decompileColorLayersV0(table):
- if not table.LayerRecordArray:
- return {}
- colorLayerLists = {}
- layerRecords = table.LayerRecordArray.LayerRecord
- numLayerRecords = len(layerRecords)
- for baseRec in table.BaseGlyphRecordArray.BaseGlyphRecord:
- baseGlyph = baseRec.BaseGlyph
- firstLayerIndex = baseRec.FirstLayerIndex
- numLayers = baseRec.NumLayers
- assert (firstLayerIndex + numLayers <= numLayerRecords)
- layers = []
- for i in range(firstLayerIndex, firstLayerIndex+numLayers):
- layerRec = layerRecords[i]
- layers.append(
- LayerRecord(layerRec.LayerGlyph, layerRec.PaletteIndex)
- )
- colorLayerLists[baseGlyph] = layers
- return colorLayerLists
-
- def _toOTTable(self, ttFont):
- from . import otTables
- from fontTools.colorLib.builder import populateCOLRv0
-
- tableClass = getattr(otTables, self.tableTag)
- table = tableClass()
- table.Version = self.version
-
- populateCOLRv0(
- table,
- {
- baseGlyph: [(layer.name, layer.colorID) for layer in layers]
- for baseGlyph, layers in self.ColorLayers.items()
- },
- glyphMap=ttFont.getReverseGlyphMap(rebuild=True),
- )
- return table
-
- def decompile(self, data, ttFont):
- from .otBase import OTTableReader
- from . import otTables
-
- # We use otData to decompile, but we adapt the decompiled otTables to the
- # existing COLR v0 API for backward compatibility.
- reader = OTTableReader(data, tableTag=self.tableTag)
- tableClass = getattr(otTables, self.tableTag)
- table = tableClass()
- table.decompile(reader, ttFont)
-
- self.version = table.Version
- if self.version == 0:
- self.ColorLayers = self._decompileColorLayersV0(table)
- else:
- # for new versions, keep the raw otTables around
- self.table = table
-
- def compile(self, ttFont):
- from .otBase import OTTableWriter
-
- if hasattr(self, "table"):
- table = self.table
- else:
- table = self._toOTTable(ttFont)
-
- writer = OTTableWriter(tableTag=self.tableTag)
- table.compile(writer, ttFont)
- return writer.getAllData()
-
- def toXML(self, writer, ttFont):
- if hasattr(self, "table"):
- self.table.toXML2(writer, ttFont)
- else:
- writer.simpletag("version", value=self.version)
- writer.newline()
- for baseGlyph in sorted(self.ColorLayers.keys(), key=ttFont.getGlyphID):
- writer.begintag("ColorGlyph", name=baseGlyph)
- writer.newline()
- for layer in self.ColorLayers[baseGlyph]:
- layer.toXML(writer, ttFont)
- writer.endtag("ColorGlyph")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "version": # old COLR v0 API
- setattr(self, name, safeEval(attrs["value"]))
- elif name == "ColorGlyph":
- if not hasattr(self, "ColorLayers"):
- self.ColorLayers = {}
- glyphName = attrs["name"]
- for element in content:
- if isinstance(element, str):
- continue
- layers = []
- for element in content:
- if isinstance(element, str):
- continue
- layer = LayerRecord()
- layer.fromXML(element[0], element[1], element[2], ttFont)
- layers.append (layer)
- self.ColorLayers[glyphName] = layers
- else: # new COLR v1 API
- from . import otTables
-
- if not hasattr(self, "table"):
- tableClass = getattr(otTables, self.tableTag)
- self.table = tableClass()
- self.table.fromXML(name, attrs, content, ttFont)
- self.table.populateDefaults()
- self.version = self.table.Version
-
- def __getitem__(self, glyphName):
- if not isinstance(glyphName, str):
- raise TypeError(f"expected str, found {type(glyphName).__name__}")
- return self.ColorLayers[glyphName]
-
- def __setitem__(self, glyphName, value):
- if not isinstance(glyphName, str):
- raise TypeError(f"expected str, found {type(glyphName).__name__}")
- if value is not None:
- self.ColorLayers[glyphName] = value
- elif glyphName in self.ColorLayers:
- del self.ColorLayers[glyphName]
-
- def __delitem__(self, glyphName):
- del self.ColorLayers[glyphName]
+ """This table is structured so that you can treat it like a dictionary keyed by glyph name.
+
+ ``ttFont['COLR'][<glyphName>]`` will return the color layers for any glyph.
+
+ ``ttFont['COLR'][<glyphName>] = <value>`` will set the color layers for any glyph.
+ """
+
+ @staticmethod
+ def _decompileColorLayersV0(table):
+ if not table.LayerRecordArray:
+ return {}
+ colorLayerLists = {}
+ layerRecords = table.LayerRecordArray.LayerRecord
+ numLayerRecords = len(layerRecords)
+ for baseRec in table.BaseGlyphRecordArray.BaseGlyphRecord:
+ baseGlyph = baseRec.BaseGlyph
+ firstLayerIndex = baseRec.FirstLayerIndex
+ numLayers = baseRec.NumLayers
+ assert firstLayerIndex + numLayers <= numLayerRecords
+ layers = []
+ for i in range(firstLayerIndex, firstLayerIndex + numLayers):
+ layerRec = layerRecords[i]
+ layers.append(LayerRecord(layerRec.LayerGlyph, layerRec.PaletteIndex))
+ colorLayerLists[baseGlyph] = layers
+ return colorLayerLists
+
+ def _toOTTable(self, ttFont):
+ from . import otTables
+ from fontTools.colorLib.builder import populateCOLRv0
+
+ tableClass = getattr(otTables, self.tableTag)
+ table = tableClass()
+ table.Version = self.version
+
+ populateCOLRv0(
+ table,
+ {
+ baseGlyph: [(layer.name, layer.colorID) for layer in layers]
+ for baseGlyph, layers in self.ColorLayers.items()
+ },
+ glyphMap=ttFont.getReverseGlyphMap(rebuild=True),
+ )
+ return table
+
+ def decompile(self, data, ttFont):
+ from .otBase import OTTableReader
+ from . import otTables
+
+ # We use otData to decompile, but we adapt the decompiled otTables to the
+ # existing COLR v0 API for backward compatibility.
+ reader = OTTableReader(data, tableTag=self.tableTag)
+ tableClass = getattr(otTables, self.tableTag)
+ table = tableClass()
+ table.decompile(reader, ttFont)
+
+ self.version = table.Version
+ if self.version == 0:
+ self.ColorLayers = self._decompileColorLayersV0(table)
+ else:
+ # for new versions, keep the raw otTables around
+ self.table = table
+
+ def compile(self, ttFont):
+ from .otBase import OTTableWriter
+
+ if hasattr(self, "table"):
+ table = self.table
+ else:
+ table = self._toOTTable(ttFont)
+
+ writer = OTTableWriter(tableTag=self.tableTag)
+ table.compile(writer, ttFont)
+ return writer.getAllData()
+
+ def toXML(self, writer, ttFont):
+ if hasattr(self, "table"):
+ self.table.toXML2(writer, ttFont)
+ else:
+ writer.simpletag("version", value=self.version)
+ writer.newline()
+ for baseGlyph in sorted(self.ColorLayers.keys(), key=ttFont.getGlyphID):
+ writer.begintag("ColorGlyph", name=baseGlyph)
+ writer.newline()
+ for layer in self.ColorLayers[baseGlyph]:
+ layer.toXML(writer, ttFont)
+ writer.endtag("ColorGlyph")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "version": # old COLR v0 API
+ setattr(self, name, safeEval(attrs["value"]))
+ elif name == "ColorGlyph":
+ if not hasattr(self, "ColorLayers"):
+ self.ColorLayers = {}
+ glyphName = attrs["name"]
+ for element in content:
+ if isinstance(element, str):
+ continue
+ layers = []
+ for element in content:
+ if isinstance(element, str):
+ continue
+ layer = LayerRecord()
+ layer.fromXML(element[0], element[1], element[2], ttFont)
+ layers.append(layer)
+ self.ColorLayers[glyphName] = layers
+ else: # new COLR v1 API
+ from . import otTables
+
+ if not hasattr(self, "table"):
+ tableClass = getattr(otTables, self.tableTag)
+ self.table = tableClass()
+ self.table.fromXML(name, attrs, content, ttFont)
+ self.table.populateDefaults()
+ self.version = self.table.Version
+
+ def __getitem__(self, glyphName):
+ if not isinstance(glyphName, str):
+ raise TypeError(f"expected str, found {type(glyphName).__name__}")
+ return self.ColorLayers[glyphName]
+
+ def __setitem__(self, glyphName, value):
+ if not isinstance(glyphName, str):
+ raise TypeError(f"expected str, found {type(glyphName).__name__}")
+ if value is not None:
+ self.ColorLayers[glyphName] = value
+ elif glyphName in self.ColorLayers:
+ del self.ColorLayers[glyphName]
+
+ def __delitem__(self, glyphName):
+ del self.ColorLayers[glyphName]
-class LayerRecord(object):
-
- def __init__(self, name=None, colorID=None):
- self.name = name
- self.colorID = colorID
- def toXML(self, writer, ttFont):
- writer.simpletag("layer", name=self.name, colorID=self.colorID)
- writer.newline()
-
- def fromXML(self, eltname, attrs, content, ttFont):
- for (name, value) in attrs.items():
- if name == "name":
- setattr(self, name, value)
- else:
- setattr(self, name, safeEval(value))
+class LayerRecord(object):
+ def __init__(self, name=None, colorID=None):
+ self.name = name
+ self.colorID = colorID
+
+ def toXML(self, writer, ttFont):
+ writer.simpletag("layer", name=self.name, colorID=self.colorID)
+ writer.newline()
+
+ def fromXML(self, eltname, attrs, content, ttFont):
+ for name, value in attrs.items():
+ if name == "name":
+ setattr(self, name, value)
+ else:
+ setattr(self, name, safeEval(value))
diff --git a/Lib/fontTools/ttLib/tables/C_P_A_L_.py b/Lib/fontTools/ttLib/tables/C_P_A_L_.py
index 1ad342f1..9fb2074a 100644
--- a/Lib/fontTools/ttLib/tables/C_P_A_L_.py
+++ b/Lib/fontTools/ttLib/tables/C_P_A_L_.py
@@ -11,250 +11,286 @@ import sys
class table_C_P_A_L_(DefaultTable.DefaultTable):
+ NO_NAME_ID = 0xFFFF
+ DEFAULT_PALETTE_TYPE = 0
- NO_NAME_ID = 0xFFFF
- DEFAULT_PALETTE_TYPE = 0
+ def __init__(self, tag=None):
+ DefaultTable.DefaultTable.__init__(self, tag)
+ self.palettes = []
+ self.paletteTypes = []
+ self.paletteLabels = []
+ self.paletteEntryLabels = []
- def __init__(self, tag=None):
- DefaultTable.DefaultTable.__init__(self, tag)
- self.palettes = []
- self.paletteTypes = []
- self.paletteLabels = []
- self.paletteEntryLabels = []
+ def decompile(self, data, ttFont):
+ (
+ self.version,
+ self.numPaletteEntries,
+ numPalettes,
+ numColorRecords,
+ goffsetFirstColorRecord,
+ ) = struct.unpack(">HHHHL", data[:12])
+ assert (
+ self.version <= 1
+ ), "Version of CPAL table is higher than I know how to handle"
+ self.palettes = []
+ pos = 12
+ for i in range(numPalettes):
+ startIndex = struct.unpack(">H", data[pos : pos + 2])[0]
+ assert startIndex + self.numPaletteEntries <= numColorRecords
+ pos += 2
+ palette = []
+ ppos = goffsetFirstColorRecord + startIndex * 4
+ for j in range(self.numPaletteEntries):
+ palette.append(Color(*struct.unpack(">BBBB", data[ppos : ppos + 4])))
+ ppos += 4
+ self.palettes.append(palette)
+ if self.version == 0:
+ offsetToPaletteTypeArray = 0
+ offsetToPaletteLabelArray = 0
+ offsetToPaletteEntryLabelArray = 0
+ else:
+ pos = 12 + numPalettes * 2
+ (
+ offsetToPaletteTypeArray,
+ offsetToPaletteLabelArray,
+ offsetToPaletteEntryLabelArray,
+ ) = struct.unpack(">LLL", data[pos : pos + 12])
+ self.paletteTypes = self._decompileUInt32Array(
+ data,
+ offsetToPaletteTypeArray,
+ numPalettes,
+ default=self.DEFAULT_PALETTE_TYPE,
+ )
+ self.paletteLabels = self._decompileUInt16Array(
+ data, offsetToPaletteLabelArray, numPalettes, default=self.NO_NAME_ID
+ )
+ self.paletteEntryLabels = self._decompileUInt16Array(
+ data,
+ offsetToPaletteEntryLabelArray,
+ self.numPaletteEntries,
+ default=self.NO_NAME_ID,
+ )
- def decompile(self, data, ttFont):
- self.version, self.numPaletteEntries, numPalettes, numColorRecords, goffsetFirstColorRecord = struct.unpack(">HHHHL", data[:12])
- assert (self.version <= 1), "Version of CPAL table is higher than I know how to handle"
- self.palettes = []
- pos = 12
- for i in range(numPalettes):
- startIndex = struct.unpack(">H", data[pos:pos+2])[0]
- assert (startIndex + self.numPaletteEntries <= numColorRecords)
- pos += 2
- palette = []
- ppos = goffsetFirstColorRecord + startIndex * 4
- for j in range(self.numPaletteEntries):
- palette.append( Color(*struct.unpack(">BBBB", data[ppos:ppos+4])) )
- ppos += 4
- self.palettes.append(palette)
- if self.version == 0:
- offsetToPaletteTypeArray = 0
- offsetToPaletteLabelArray = 0
- offsetToPaletteEntryLabelArray = 0
- else:
- pos = 12 + numPalettes * 2
- (offsetToPaletteTypeArray, offsetToPaletteLabelArray,
- offsetToPaletteEntryLabelArray) = (
- struct.unpack(">LLL", data[pos:pos+12]))
- self.paletteTypes = self._decompileUInt32Array(
- data, offsetToPaletteTypeArray, numPalettes,
- default=self.DEFAULT_PALETTE_TYPE)
- self.paletteLabels = self._decompileUInt16Array(
- data, offsetToPaletteLabelArray, numPalettes, default=self.NO_NAME_ID)
- self.paletteEntryLabels = self._decompileUInt16Array(
- data, offsetToPaletteEntryLabelArray,
- self.numPaletteEntries, default=self.NO_NAME_ID)
+ def _decompileUInt16Array(self, data, offset, numElements, default=0):
+ if offset == 0:
+ return [default] * numElements
+ result = array.array("H", data[offset : offset + 2 * numElements])
+ if sys.byteorder != "big":
+ result.byteswap()
+ assert len(result) == numElements, result
+ return result.tolist()
- def _decompileUInt16Array(self, data, offset, numElements, default=0):
- if offset == 0:
- return [default] * numElements
- result = array.array("H", data[offset : offset + 2 * numElements])
- if sys.byteorder != "big": result.byteswap()
- assert len(result) == numElements, result
- return result.tolist()
+ def _decompileUInt32Array(self, data, offset, numElements, default=0):
+ if offset == 0:
+ return [default] * numElements
+ result = array.array("I", data[offset : offset + 4 * numElements])
+ if sys.byteorder != "big":
+ result.byteswap()
+ assert len(result) == numElements, result
+ return result.tolist()
- def _decompileUInt32Array(self, data, offset, numElements, default=0):
- if offset == 0:
- return [default] * numElements
- result = array.array("I", data[offset : offset + 4 * numElements])
- if sys.byteorder != "big": result.byteswap()
- assert len(result) == numElements, result
- return result.tolist()
+ def compile(self, ttFont):
+ colorRecordIndices, colorRecords = self._compileColorRecords()
+ paletteTypes = self._compilePaletteTypes()
+ paletteLabels = self._compilePaletteLabels()
+ paletteEntryLabels = self._compilePaletteEntryLabels()
+ numColorRecords = len(colorRecords) // 4
+ offsetToFirstColorRecord = 12 + len(colorRecordIndices)
+ if self.version >= 1:
+ offsetToFirstColorRecord += 12
+ header = struct.pack(
+ ">HHHHL",
+ self.version,
+ self.numPaletteEntries,
+ len(self.palettes),
+ numColorRecords,
+ offsetToFirstColorRecord,
+ )
+ if self.version == 0:
+ dataList = [header, colorRecordIndices, colorRecords]
+ else:
+ pos = offsetToFirstColorRecord + len(colorRecords)
+ if len(paletteTypes) == 0:
+ offsetToPaletteTypeArray = 0
+ else:
+ offsetToPaletteTypeArray = pos
+ pos += len(paletteTypes)
+ if len(paletteLabels) == 0:
+ offsetToPaletteLabelArray = 0
+ else:
+ offsetToPaletteLabelArray = pos
+ pos += len(paletteLabels)
+ if len(paletteEntryLabels) == 0:
+ offsetToPaletteEntryLabelArray = 0
+ else:
+ offsetToPaletteEntryLabelArray = pos
+ pos += len(paletteLabels)
+ header1 = struct.pack(
+ ">LLL",
+ offsetToPaletteTypeArray,
+ offsetToPaletteLabelArray,
+ offsetToPaletteEntryLabelArray,
+ )
+ dataList = [
+ header,
+ colorRecordIndices,
+ header1,
+ colorRecords,
+ paletteTypes,
+ paletteLabels,
+ paletteEntryLabels,
+ ]
+ return bytesjoin(dataList)
- def compile(self, ttFont):
- colorRecordIndices, colorRecords = self._compileColorRecords()
- paletteTypes = self._compilePaletteTypes()
- paletteLabels = self._compilePaletteLabels()
- paletteEntryLabels = self._compilePaletteEntryLabels()
- numColorRecords = len(colorRecords) // 4
- offsetToFirstColorRecord = 12 + len(colorRecordIndices)
- if self.version >= 1:
- offsetToFirstColorRecord += 12
- header = struct.pack(">HHHHL", self.version,
- self.numPaletteEntries, len(self.palettes),
- numColorRecords, offsetToFirstColorRecord)
- if self.version == 0:
- dataList = [header, colorRecordIndices, colorRecords]
- else:
- pos = offsetToFirstColorRecord + len(colorRecords)
- if len(paletteTypes) == 0:
- offsetToPaletteTypeArray = 0
- else:
- offsetToPaletteTypeArray = pos
- pos += len(paletteTypes)
- if len(paletteLabels) == 0:
- offsetToPaletteLabelArray = 0
- else:
- offsetToPaletteLabelArray = pos
- pos += len(paletteLabels)
- if len(paletteEntryLabels) == 0:
- offsetToPaletteEntryLabelArray = 0
- else:
- offsetToPaletteEntryLabelArray = pos
- pos += len(paletteLabels)
- header1 = struct.pack(">LLL",
- offsetToPaletteTypeArray,
- offsetToPaletteLabelArray,
- offsetToPaletteEntryLabelArray)
- dataList = [header, colorRecordIndices, header1,
- colorRecords, paletteTypes, paletteLabels,
- paletteEntryLabels]
- return bytesjoin(dataList)
+ def _compilePalette(self, palette):
+ assert len(palette) == self.numPaletteEntries
+ pack = lambda c: struct.pack(">BBBB", c.blue, c.green, c.red, c.alpha)
+ return bytesjoin([pack(color) for color in palette])
- def _compilePalette(self, palette):
- assert(len(palette) == self.numPaletteEntries)
- pack = lambda c: struct.pack(">BBBB", c.blue, c.green, c.red, c.alpha)
- return bytesjoin([pack(color) for color in palette])
+ def _compileColorRecords(self):
+ colorRecords, colorRecordIndices, pool = [], [], {}
+ for palette in self.palettes:
+ packedPalette = self._compilePalette(palette)
+ if packedPalette in pool:
+ index = pool[packedPalette]
+ else:
+ index = len(colorRecords)
+ colorRecords.append(packedPalette)
+ pool[packedPalette] = index
+ colorRecordIndices.append(struct.pack(">H", index * self.numPaletteEntries))
+ return bytesjoin(colorRecordIndices), bytesjoin(colorRecords)
- def _compileColorRecords(self):
- colorRecords, colorRecordIndices, pool = [], [], {}
- for palette in self.palettes:
- packedPalette = self._compilePalette(palette)
- if packedPalette in pool:
- index = pool[packedPalette]
- else:
- index = len(colorRecords)
- colorRecords.append(packedPalette)
- pool[packedPalette] = index
- colorRecordIndices.append(struct.pack(">H", index * self.numPaletteEntries))
- return bytesjoin(colorRecordIndices), bytesjoin(colorRecords)
+ def _compilePaletteTypes(self):
+ if self.version == 0 or not any(self.paletteTypes):
+ return b""
+ assert len(self.paletteTypes) == len(self.palettes)
+ result = bytesjoin([struct.pack(">I", ptype) for ptype in self.paletteTypes])
+ assert len(result) == 4 * len(self.palettes)
+ return result
- def _compilePaletteTypes(self):
- if self.version == 0 or not any(self.paletteTypes):
- return b''
- assert len(self.paletteTypes) == len(self.palettes)
- result = bytesjoin([struct.pack(">I", ptype)
- for ptype in self.paletteTypes])
- assert len(result) == 4 * len(self.palettes)
- return result
+ def _compilePaletteLabels(self):
+ if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteLabels):
+ return b""
+ assert len(self.paletteLabels) == len(self.palettes)
+ result = bytesjoin([struct.pack(">H", label) for label in self.paletteLabels])
+ assert len(result) == 2 * len(self.palettes)
+ return result
- def _compilePaletteLabels(self):
- if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteLabels):
- return b''
- assert len(self.paletteLabels) == len(self.palettes)
- result = bytesjoin([struct.pack(">H", label)
- for label in self.paletteLabels])
- assert len(result) == 2 * len(self.palettes)
- return result
+ def _compilePaletteEntryLabels(self):
+ if self.version == 0 or all(
+ l == self.NO_NAME_ID for l in self.paletteEntryLabels
+ ):
+ return b""
+ assert len(self.paletteEntryLabels) == self.numPaletteEntries
+ result = bytesjoin(
+ [struct.pack(">H", label) for label in self.paletteEntryLabels]
+ )
+ assert len(result) == 2 * self.numPaletteEntries
+ return result
- def _compilePaletteEntryLabels(self):
- if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteEntryLabels):
- return b''
- assert len(self.paletteEntryLabels) == self.numPaletteEntries
- result = bytesjoin([struct.pack(">H", label)
- for label in self.paletteEntryLabels])
- assert len(result) == 2 * self.numPaletteEntries
- return result
+ def toXML(self, writer, ttFont):
+ numPalettes = len(self.palettes)
+ paletteLabels = {i: nameID for (i, nameID) in enumerate(self.paletteLabels)}
+ paletteTypes = {i: typ for (i, typ) in enumerate(self.paletteTypes)}
+ writer.simpletag("version", value=self.version)
+ writer.newline()
+ writer.simpletag("numPaletteEntries", value=self.numPaletteEntries)
+ writer.newline()
+ for index, palette in enumerate(self.palettes):
+ attrs = {"index": index}
+ paletteType = paletteTypes.get(index, self.DEFAULT_PALETTE_TYPE)
+ paletteLabel = paletteLabels.get(index, self.NO_NAME_ID)
+ if self.version > 0 and paletteLabel != self.NO_NAME_ID:
+ attrs["label"] = paletteLabel
+ if self.version > 0 and paletteType != self.DEFAULT_PALETTE_TYPE:
+ attrs["type"] = paletteType
+ writer.begintag("palette", **attrs)
+ writer.newline()
+ if (
+ self.version > 0
+ and paletteLabel != self.NO_NAME_ID
+ and ttFont
+ and "name" in ttFont
+ ):
+ name = ttFont["name"].getDebugName(paletteLabel)
+ if name is not None:
+ writer.comment(name)
+ writer.newline()
+ assert len(palette) == self.numPaletteEntries
+ for cindex, color in enumerate(palette):
+ color.toXML(writer, ttFont, cindex)
+ writer.endtag("palette")
+ writer.newline()
+ if self.version > 0 and not all(
+ l == self.NO_NAME_ID for l in self.paletteEntryLabels
+ ):
+ writer.begintag("paletteEntryLabels")
+ writer.newline()
+ for index, label in enumerate(self.paletteEntryLabels):
+ if label != self.NO_NAME_ID:
+ writer.simpletag("label", index=index, value=label)
+ if self.version > 0 and label and ttFont and "name" in ttFont:
+ name = ttFont["name"].getDebugName(label)
+ if name is not None:
+ writer.comment(name)
+ writer.newline()
+ writer.endtag("paletteEntryLabels")
+ writer.newline()
- def toXML(self, writer, ttFont):
- numPalettes = len(self.palettes)
- paletteLabels = {i: nameID
- for (i, nameID) in enumerate(self.paletteLabels)}
- paletteTypes = {i: typ for (i, typ) in enumerate(self.paletteTypes)}
- writer.simpletag("version", value=self.version)
- writer.newline()
- writer.simpletag("numPaletteEntries",
- value=self.numPaletteEntries)
- writer.newline()
- for index, palette in enumerate(self.palettes):
- attrs = {"index": index}
- paletteType = paletteTypes.get(index, self.DEFAULT_PALETTE_TYPE)
- paletteLabel = paletteLabels.get(index, self.NO_NAME_ID)
- if self.version > 0 and paletteLabel != self.NO_NAME_ID:
- attrs["label"] = paletteLabel
- if self.version > 0 and paletteType != self.DEFAULT_PALETTE_TYPE:
- attrs["type"] = paletteType
- writer.begintag("palette", **attrs)
- writer.newline()
- if (self.version > 0 and paletteLabel != self.NO_NAME_ID and
- ttFont and "name" in ttFont):
- name = ttFont["name"].getDebugName(paletteLabel)
- if name is not None:
- writer.comment(name)
- writer.newline()
- assert(len(palette) == self.numPaletteEntries)
- for cindex, color in enumerate(palette):
- color.toXML(writer, ttFont, cindex)
- writer.endtag("palette")
- writer.newline()
- if self.version > 0 and not all(l == self.NO_NAME_ID for l in self.paletteEntryLabels):
- writer.begintag("paletteEntryLabels")
- writer.newline()
- for index, label in enumerate(self.paletteEntryLabels):
- if label != self.NO_NAME_ID:
- writer.simpletag("label", index=index, value=label)
- if (self.version > 0 and label and ttFont and "name" in ttFont):
- name = ttFont["name"].getDebugName(label)
- if name is not None:
- writer.comment(name)
- writer.newline()
- writer.endtag("paletteEntryLabels")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "palette":
- self.paletteLabels.append(int(attrs.get("label", self.NO_NAME_ID)))
- self.paletteTypes.append(int(attrs.get("type", self.DEFAULT_PALETTE_TYPE)))
- palette = []
- for element in content:
- if isinstance(element, str):
- continue
- attrs = element[1]
- color = Color.fromHex(attrs["value"])
- palette.append(color)
- self.palettes.append(palette)
- elif name == "paletteEntryLabels":
- colorLabels = {}
- for element in content:
- if isinstance(element, str):
- continue
- elementName, elementAttr, _ = element
- if elementName == "label":
- labelIndex = safeEval(elementAttr["index"])
- nameID = safeEval(elementAttr["value"])
- colorLabels[labelIndex] = nameID
- self.paletteEntryLabels = [
- colorLabels.get(i, self.NO_NAME_ID)
- for i in range(self.numPaletteEntries)]
- elif "value" in attrs:
- value = safeEval(attrs["value"])
- setattr(self, name, value)
- if name == "numPaletteEntries":
- self.paletteEntryLabels = [self.NO_NAME_ID] * self.numPaletteEntries
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "palette":
+ self.paletteLabels.append(int(attrs.get("label", self.NO_NAME_ID)))
+ self.paletteTypes.append(int(attrs.get("type", self.DEFAULT_PALETTE_TYPE)))
+ palette = []
+ for element in content:
+ if isinstance(element, str):
+ continue
+ attrs = element[1]
+ color = Color.fromHex(attrs["value"])
+ palette.append(color)
+ self.palettes.append(palette)
+ elif name == "paletteEntryLabels":
+ colorLabels = {}
+ for element in content:
+ if isinstance(element, str):
+ continue
+ elementName, elementAttr, _ = element
+ if elementName == "label":
+ labelIndex = safeEval(elementAttr["index"])
+ nameID = safeEval(elementAttr["value"])
+ colorLabels[labelIndex] = nameID
+ self.paletteEntryLabels = [
+ colorLabels.get(i, self.NO_NAME_ID)
+ for i in range(self.numPaletteEntries)
+ ]
+ elif "value" in attrs:
+ value = safeEval(attrs["value"])
+ setattr(self, name, value)
+ if name == "numPaletteEntries":
+ self.paletteEntryLabels = [self.NO_NAME_ID] * self.numPaletteEntries
class Color(namedtuple("Color", "blue green red alpha")):
+ def hex(self):
+ return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha)
- def hex(self):
- return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha)
-
- def __repr__(self):
- return self.hex()
+ def __repr__(self):
+ return self.hex()
- def toXML(self, writer, ttFont, index=None):
- writer.simpletag("color", value=self.hex(), index=index)
- writer.newline()
+ def toXML(self, writer, ttFont, index=None):
+ writer.simpletag("color", value=self.hex(), index=index)
+ writer.newline()
- @classmethod
- def fromHex(cls, value):
- if value[0] == '#':
- value = value[1:]
- red = int(value[0:2], 16)
- green = int(value[2:4], 16)
- blue = int(value[4:6], 16)
- alpha = int(value[6:8], 16) if len (value) >= 8 else 0xFF
- return cls(red=red, green=green, blue=blue, alpha=alpha)
+ @classmethod
+ def fromHex(cls, value):
+ if value[0] == "#":
+ value = value[1:]
+ red = int(value[0:2], 16)
+ green = int(value[2:4], 16)
+ blue = int(value[4:6], 16)
+ alpha = int(value[6:8], 16) if len(value) >= 8 else 0xFF
+ return cls(red=red, green=green, blue=blue, alpha=alpha)
- @classmethod
- def fromRGBA(cls, red, green, blue, alpha):
- return cls(red=red, green=green, blue=blue, alpha=alpha)
+ @classmethod
+ def fromRGBA(cls, red, green, blue, alpha):
+ return cls(red=red, green=green, blue=blue, alpha=alpha)
diff --git a/Lib/fontTools/ttLib/tables/D_S_I_G_.py b/Lib/fontTools/ttLib/tables/D_S_I_G_.py
index 02fddee6..d902a290 100644
--- a/Lib/fontTools/ttLib/tables/D_S_I_G_.py
+++ b/Lib/fontTools/ttLib/tables/D_S_I_G_.py
@@ -37,93 +37,115 @@ DSIG_SignatureBlockFormat = """
# on compilation with no padding whatsoever.
#
+
class table_D_S_I_G_(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self)
+ assert self.ulVersion == 1, "DSIG ulVersion must be 1"
+ assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0"
+ self.signatureRecords = sigrecs = []
+ for n in range(self.usNumSigs):
+ sigrec, newData = sstruct.unpack2(
+ DSIG_SignatureFormat, newData, SignatureRecord()
+ )
+ assert sigrec.ulFormat == 1, (
+ "DSIG signature record #%d ulFormat must be 1" % n
+ )
+ sigrecs.append(sigrec)
+ for sigrec in sigrecs:
+ dummy, newData = sstruct.unpack2(
+ DSIG_SignatureBlockFormat, data[sigrec.ulOffset :], sigrec
+ )
+ assert sigrec.usReserved1 == 0, (
+ "DSIG signature record #%d usReserverd1 must be 0" % n
+ )
+ assert sigrec.usReserved2 == 0, (
+ "DSIG signature record #%d usReserverd2 must be 0" % n
+ )
+ sigrec.pkcs7 = newData[: sigrec.cbSignature]
- def decompile(self, data, ttFont):
- dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self)
- assert self.ulVersion == 1, "DSIG ulVersion must be 1"
- assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0"
- self.signatureRecords = sigrecs = []
- for n in range(self.usNumSigs):
- sigrec, newData = sstruct.unpack2(DSIG_SignatureFormat, newData, SignatureRecord())
- assert sigrec.ulFormat == 1, "DSIG signature record #%d ulFormat must be 1" % n
- sigrecs.append(sigrec)
- for sigrec in sigrecs:
- dummy, newData = sstruct.unpack2(DSIG_SignatureBlockFormat, data[sigrec.ulOffset:], sigrec)
- assert sigrec.usReserved1 == 0, "DSIG signature record #%d usReserverd1 must be 0" % n
- assert sigrec.usReserved2 == 0, "DSIG signature record #%d usReserverd2 must be 0" % n
- sigrec.pkcs7 = newData[:sigrec.cbSignature]
+ def compile(self, ttFont):
+ packed = sstruct.pack(DSIG_HeaderFormat, self)
+ headers = [packed]
+ offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat)
+ data = []
+ for sigrec in self.signatureRecords:
+ # first pack signature block
+ sigrec.cbSignature = len(sigrec.pkcs7)
+ packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7
+ data.append(packed)
+ # update redundant length field
+ sigrec.ulLength = len(packed)
+ # update running table offset
+ sigrec.ulOffset = offset
+ headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec))
+ offset += sigrec.ulLength
+ if offset % 2:
+ # Pad to even bytes
+ data.append(b"\0")
+ return bytesjoin(headers + data)
- def compile(self, ttFont):
- packed = sstruct.pack(DSIG_HeaderFormat, self)
- headers = [packed]
- offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat)
- data = []
- for sigrec in self.signatureRecords:
- # first pack signature block
- sigrec.cbSignature = len(sigrec.pkcs7)
- packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7
- data.append(packed)
- # update redundant length field
- sigrec.ulLength = len(packed)
- # update running table offset
- sigrec.ulOffset = offset
- headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec))
- offset += sigrec.ulLength
- if offset % 2:
- # Pad to even bytes
- data.append(b'\0')
- return bytesjoin(headers+data)
+ def toXML(self, xmlWriter, ttFont):
+ xmlWriter.comment(
+ "note that the Digital Signature will be invalid after recompilation!"
+ )
+ xmlWriter.newline()
+ xmlWriter.simpletag(
+ "tableHeader",
+ version=self.ulVersion,
+ numSigs=self.usNumSigs,
+ flag="0x%X" % self.usFlag,
+ )
+ for sigrec in self.signatureRecords:
+ xmlWriter.newline()
+ sigrec.toXML(xmlWriter, ttFont)
+ xmlWriter.newline()
- def toXML(self, xmlWriter, ttFont):
- xmlWriter.comment("note that the Digital Signature will be invalid after recompilation!")
- xmlWriter.newline()
- xmlWriter.simpletag("tableHeader", version=self.ulVersion, numSigs=self.usNumSigs, flag="0x%X" % self.usFlag)
- for sigrec in self.signatureRecords:
- xmlWriter.newline()
- sigrec.toXML(xmlWriter, ttFont)
- xmlWriter.newline()
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "tableHeader":
+ self.signatureRecords = []
+ self.ulVersion = safeEval(attrs["version"])
+ self.usNumSigs = safeEval(attrs["numSigs"])
+ self.usFlag = safeEval(attrs["flag"])
+ return
+ if name == "SignatureRecord":
+ sigrec = SignatureRecord()
+ sigrec.fromXML(name, attrs, content, ttFont)
+ self.signatureRecords.append(sigrec)
- def fromXML(self, name, attrs, content, ttFont):
- if name == "tableHeader":
- self.signatureRecords = []
- self.ulVersion = safeEval(attrs["version"])
- self.usNumSigs = safeEval(attrs["numSigs"])
- self.usFlag = safeEval(attrs["flag"])
- return
- if name == "SignatureRecord":
- sigrec = SignatureRecord()
- sigrec.fromXML(name, attrs, content, ttFont)
- self.signatureRecords.append(sigrec)
-pem_spam = lambda l, spam = {
- "-----BEGIN PKCS7-----": True, "-----END PKCS7-----": True, "": True
+pem_spam = lambda l, spam={
+ "-----BEGIN PKCS7-----": True,
+ "-----END PKCS7-----": True,
+ "": True,
}: not spam.get(l.strip())
+
def b64encode(b):
- s = base64.b64encode(b)
- # Line-break at 76 chars.
- items = []
- while s:
- items.append(tostr(s[:76]))
- items.append('\n')
- s = s[76:]
- return strjoin(items)
+ s = base64.b64encode(b)
+ # Line-break at 76 chars.
+ items = []
+ while s:
+ items.append(tostr(s[:76]))
+ items.append("\n")
+ s = s[76:]
+ return strjoin(items)
+
class SignatureRecord(object):
- def __repr__(self):
- return "<%s: %s>" % (self.__class__.__name__, self.__dict__)
+ def __repr__(self):
+ return "<%s: %s>" % (self.__class__.__name__, self.__dict__)
- def toXML(self, writer, ttFont):
- writer.begintag(self.__class__.__name__, format=self.ulFormat)
- writer.newline()
- writer.write_noindent("-----BEGIN PKCS7-----\n")
- writer.write_noindent(b64encode(self.pkcs7))
- writer.write_noindent("-----END PKCS7-----\n")
- writer.endtag(self.__class__.__name__)
+ def toXML(self, writer, ttFont):
+ writer.begintag(self.__class__.__name__, format=self.ulFormat)
+ writer.newline()
+ writer.write_noindent("-----BEGIN PKCS7-----\n")
+ writer.write_noindent(b64encode(self.pkcs7))
+ writer.write_noindent("-----END PKCS7-----\n")
+ writer.endtag(self.__class__.__name__)
- def fromXML(self, name, attrs, content, ttFont):
- self.ulFormat = safeEval(attrs["format"])
- self.usReserved1 = safeEval(attrs.get("reserved1", "0"))
- self.usReserved2 = safeEval(attrs.get("reserved2", "0"))
- self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content))))
+ def fromXML(self, name, attrs, content, ttFont):
+ self.ulFormat = safeEval(attrs["format"])
+ self.usReserved1 = safeEval(attrs.get("reserved1", "0"))
+ self.usReserved2 = safeEval(attrs.get("reserved2", "0"))
+ self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content))))
diff --git a/Lib/fontTools/ttLib/tables/D__e_b_g.py b/Lib/fontTools/ttLib/tables/D__e_b_g.py
index ff64a9b5..54449a5f 100644
--- a/Lib/fontTools/ttLib/tables/D__e_b_g.py
+++ b/Lib/fontTools/ttLib/tables/D__e_b_g.py
@@ -11,7 +11,7 @@ class table_D__e_b_g(DefaultTable.DefaultTable):
return json.dumps(self.data).encode("utf-8")
def toXML(self, writer, ttFont):
- writer.writecdata(json.dumps(self.data))
+ writer.writecdata(json.dumps(self.data, indent=2))
def fromXML(self, name, attrs, content, ttFont):
self.data = json.loads(content)
diff --git a/Lib/fontTools/ttLib/tables/DefaultTable.py b/Lib/fontTools/ttLib/tables/DefaultTable.py
index dae83183..92f2aa65 100644
--- a/Lib/fontTools/ttLib/tables/DefaultTable.py
+++ b/Lib/fontTools/ttLib/tables/DefaultTable.py
@@ -1,48 +1,49 @@
from fontTools.misc.textTools import Tag
from fontTools.ttLib import getClassTag
-class DefaultTable(object):
- dependencies = []
-
- def __init__(self, tag=None):
- if tag is None:
- tag = getClassTag(self.__class__)
- self.tableTag = Tag(tag)
-
- def decompile(self, data, ttFont):
- self.data = data
-
- def compile(self, ttFont):
- return self.data
-
- def toXML(self, writer, ttFont, **kwargs):
- if hasattr(self, "ERROR"):
- writer.comment("An error occurred during the decompilation of this table")
- writer.newline()
- writer.comment(self.ERROR)
- writer.newline()
- writer.begintag("hexdata")
- writer.newline()
- writer.dumphex(self.compile(ttFont))
- writer.endtag("hexdata")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- from fontTools.misc.textTools import readHex
- from fontTools import ttLib
- if name != "hexdata":
- raise ttLib.TTLibError("can't handle '%s' element" % name)
- self.decompile(readHex(content), ttFont)
-
- def __repr__(self):
- return "<'%s' table at %x>" % (self.tableTag, id(self))
-
- def __eq__(self, other):
- if type(self) != type(other):
- return NotImplemented
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
+class DefaultTable(object):
+ dependencies = []
+
+ def __init__(self, tag=None):
+ if tag is None:
+ tag = getClassTag(self.__class__)
+ self.tableTag = Tag(tag)
+
+ def decompile(self, data, ttFont):
+ self.data = data
+
+ def compile(self, ttFont):
+ return self.data
+
+ def toXML(self, writer, ttFont, **kwargs):
+ if hasattr(self, "ERROR"):
+ writer.comment("An error occurred during the decompilation of this table")
+ writer.newline()
+ writer.comment(self.ERROR)
+ writer.newline()
+ writer.begintag("hexdata")
+ writer.newline()
+ writer.dumphex(self.compile(ttFont))
+ writer.endtag("hexdata")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ from fontTools.misc.textTools import readHex
+ from fontTools import ttLib
+
+ if name != "hexdata":
+ raise ttLib.TTLibError("can't handle '%s' element" % name)
+ self.decompile(readHex(content), ttFont)
+
+ def __repr__(self):
+ return "<'%s' table at %x>" % (self.tableTag, id(self))
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+ return self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
diff --git a/Lib/fontTools/ttLib/tables/E_B_D_T_.py b/Lib/fontTools/ttLib/tables/E_B_D_T_.py
index ae716512..9f7f82ef 100644
--- a/Lib/fontTools/ttLib/tables/E_B_D_T_.py
+++ b/Lib/fontTools/ttLib/tables/E_B_D_T_.py
@@ -1,6 +1,20 @@
from fontTools.misc import sstruct
-from fontTools.misc.textTools import bytechr, byteord, bytesjoin, strjoin, safeEval, readHex, hexStr, deHexStr
-from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
+from fontTools.misc.textTools import (
+ bytechr,
+ byteord,
+ bytesjoin,
+ strjoin,
+ safeEval,
+ readHex,
+ hexStr,
+ deHexStr,
+)
+from .BitmapGlyphMetrics import (
+ BigGlyphMetrics,
+ bigGlyphMetricsFormat,
+ SmallGlyphMetrics,
+ smallGlyphMetricsFormat,
+)
from . import DefaultTable
import itertools
import os
@@ -22,220 +36,232 @@ ebdtComponentFormat = """
yOffset: b
"""
+
class table_E_B_D_T_(DefaultTable.DefaultTable):
+ # Keep a reference to the name of the data locator table.
+ locatorName = "EBLC"
+
+ # This method can be overridden in subclasses to support new formats
+ # without changing the other implementation. Also can be used as a
+ # convenience method for coverting a font file to an alternative format.
+ def getImageFormatClass(self, imageFormat):
+ return ebdt_bitmap_classes[imageFormat]
+
+ def decompile(self, data, ttFont):
+ # Get the version but don't advance the slice.
+ # Most of the lookup for this table is done relative
+ # to the begining so slice by the offsets provided
+ # in the EBLC table.
+ sstruct.unpack2(ebdtTableVersionFormat, data, self)
+
+ # Keep a dict of glyphs that have been seen so they aren't remade.
+ # This dict maps intervals of data to the BitmapGlyph.
+ glyphDict = {}
+
+ # Pull out the EBLC table and loop through glyphs.
+ # A strike is a concept that spans both tables.
+ # The actual bitmap data is stored in the EBDT.
+ locator = ttFont[self.__class__.locatorName]
+ self.strikeData = []
+ for curStrike in locator.strikes:
+ bitmapGlyphDict = {}
+ self.strikeData.append(bitmapGlyphDict)
+ for indexSubTable in curStrike.indexSubTables:
+ dataIter = zip(indexSubTable.names, indexSubTable.locations)
+ for curName, curLoc in dataIter:
+ # Don't create duplicate data entries for the same glyphs.
+ # Instead just use the structures that already exist if they exist.
+ if curLoc in glyphDict:
+ curGlyph = glyphDict[curLoc]
+ else:
+ curGlyphData = data[slice(*curLoc)]
+ imageFormatClass = self.getImageFormatClass(
+ indexSubTable.imageFormat
+ )
+ curGlyph = imageFormatClass(curGlyphData, ttFont)
+ glyphDict[curLoc] = curGlyph
+ bitmapGlyphDict[curName] = curGlyph
+
+ def compile(self, ttFont):
+ dataList = []
+ dataList.append(sstruct.pack(ebdtTableVersionFormat, self))
+ dataSize = len(dataList[0])
+
+ # Keep a dict of glyphs that have been seen so they aren't remade.
+ # This dict maps the id of the BitmapGlyph to the interval
+ # in the data.
+ glyphDict = {}
+
+ # Go through the bitmap glyph data. Just in case the data for a glyph
+ # changed the size metrics should be recalculated. There are a variety
+ # of formats and they get stored in the EBLC table. That is why
+ # recalculation is defered to the EblcIndexSubTable class and just
+ # pass what is known about bitmap glyphs from this particular table.
+ locator = ttFont[self.__class__.locatorName]
+ for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
+ for curIndexSubTable in curStrike.indexSubTables:
+ dataLocations = []
+ for curName in curIndexSubTable.names:
+ # Handle the data placement based on seeing the glyph or not.
+ # Just save a reference to the location if the glyph has already
+ # been saved in compile. This code assumes that glyphs will only
+ # be referenced multiple times from indexFormat5. By luck the
+ # code may still work when referencing poorly ordered fonts with
+ # duplicate references. If there is a font that is unlucky the
+ # respective compile methods for the indexSubTables will fail
+ # their assertions. All fonts seem to follow this assumption.
+ # More complicated packing may be needed if a counter-font exists.
+ glyph = curGlyphDict[curName]
+ objectId = id(glyph)
+ if objectId not in glyphDict:
+ data = glyph.compile(ttFont)
+ data = curIndexSubTable.padBitmapData(data)
+ startByte = dataSize
+ dataSize += len(data)
+ endByte = dataSize
+ dataList.append(data)
+ dataLoc = (startByte, endByte)
+ glyphDict[objectId] = dataLoc
+ else:
+ dataLoc = glyphDict[objectId]
+ dataLocations.append(dataLoc)
+ # Just use the new data locations in the indexSubTable.
+ # The respective compile implementations will take care
+ # of any of the problems in the convertion that may arise.
+ curIndexSubTable.locations = dataLocations
+
+ return bytesjoin(dataList)
+
+ def toXML(self, writer, ttFont):
+ # When exporting to XML if one of the data export formats
+ # requires metrics then those metrics may be in the locator.
+ # In this case populate the bitmaps with "export metrics".
+ if ttFont.bitmapGlyphDataFormat in ("row", "bitwise"):
+ locator = ttFont[self.__class__.locatorName]
+ for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
+ for curIndexSubTable in curStrike.indexSubTables:
+ for curName in curIndexSubTable.names:
+ glyph = curGlyphDict[curName]
+ # I'm not sure which metrics have priority here.
+ # For now if both metrics exist go with glyph metrics.
+ if hasattr(glyph, "metrics"):
+ glyph.exportMetrics = glyph.metrics
+ else:
+ glyph.exportMetrics = curIndexSubTable.metrics
+ glyph.exportBitDepth = curStrike.bitmapSizeTable.bitDepth
+
+ writer.simpletag("header", [("version", self.version)])
+ writer.newline()
+ locator = ttFont[self.__class__.locatorName]
+ for strikeIndex, bitmapGlyphDict in enumerate(self.strikeData):
+ writer.begintag("strikedata", [("index", strikeIndex)])
+ writer.newline()
+ for curName, curBitmap in bitmapGlyphDict.items():
+ curBitmap.toXML(strikeIndex, curName, writer, ttFont)
+ writer.endtag("strikedata")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "header":
+ self.version = safeEval(attrs["version"])
+ elif name == "strikedata":
+ if not hasattr(self, "strikeData"):
+ self.strikeData = []
+ strikeIndex = safeEval(attrs["index"])
+
+ bitmapGlyphDict = {}
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name[4:].startswith(_bitmapGlyphSubclassPrefix[4:]):
+ imageFormat = safeEval(name[len(_bitmapGlyphSubclassPrefix) :])
+ glyphName = attrs["name"]
+ imageFormatClass = self.getImageFormatClass(imageFormat)
+ curGlyph = imageFormatClass(None, None)
+ curGlyph.fromXML(name, attrs, content, ttFont)
+ assert glyphName not in bitmapGlyphDict, (
+ "Duplicate glyphs with the same name '%s' in the same strike."
+ % glyphName
+ )
+ bitmapGlyphDict[glyphName] = curGlyph
+ else:
+ log.warning("%s being ignored by %s", name, self.__class__.__name__)
+
+ # Grow the strike data array to the appropriate size. The XML
+ # format allows the strike index value to be out of order.
+ if strikeIndex >= len(self.strikeData):
+ self.strikeData += [None] * (strikeIndex + 1 - len(self.strikeData))
+ assert (
+ self.strikeData[strikeIndex] is None
+ ), "Duplicate strike EBDT indices."
+ self.strikeData[strikeIndex] = bitmapGlyphDict
- # Keep a reference to the name of the data locator table.
- locatorName = 'EBLC'
-
- # This method can be overridden in subclasses to support new formats
- # without changing the other implementation. Also can be used as a
- # convenience method for coverting a font file to an alternative format.
- def getImageFormatClass(self, imageFormat):
- return ebdt_bitmap_classes[imageFormat]
-
- def decompile(self, data, ttFont):
- # Get the version but don't advance the slice.
- # Most of the lookup for this table is done relative
- # to the begining so slice by the offsets provided
- # in the EBLC table.
- sstruct.unpack2(ebdtTableVersionFormat, data, self)
-
- # Keep a dict of glyphs that have been seen so they aren't remade.
- # This dict maps intervals of data to the BitmapGlyph.
- glyphDict = {}
-
- # Pull out the EBLC table and loop through glyphs.
- # A strike is a concept that spans both tables.
- # The actual bitmap data is stored in the EBDT.
- locator = ttFont[self.__class__.locatorName]
- self.strikeData = []
- for curStrike in locator.strikes:
- bitmapGlyphDict = {}
- self.strikeData.append(bitmapGlyphDict)
- for indexSubTable in curStrike.indexSubTables:
- dataIter = zip(indexSubTable.names, indexSubTable.locations)
- for curName, curLoc in dataIter:
- # Don't create duplicate data entries for the same glyphs.
- # Instead just use the structures that already exist if they exist.
- if curLoc in glyphDict:
- curGlyph = glyphDict[curLoc]
- else:
- curGlyphData = data[slice(*curLoc)]
- imageFormatClass = self.getImageFormatClass(indexSubTable.imageFormat)
- curGlyph = imageFormatClass(curGlyphData, ttFont)
- glyphDict[curLoc] = curGlyph
- bitmapGlyphDict[curName] = curGlyph
-
- def compile(self, ttFont):
-
- dataList = []
- dataList.append(sstruct.pack(ebdtTableVersionFormat, self))
- dataSize = len(dataList[0])
-
- # Keep a dict of glyphs that have been seen so they aren't remade.
- # This dict maps the id of the BitmapGlyph to the interval
- # in the data.
- glyphDict = {}
-
- # Go through the bitmap glyph data. Just in case the data for a glyph
- # changed the size metrics should be recalculated. There are a variety
- # of formats and they get stored in the EBLC table. That is why
- # recalculation is defered to the EblcIndexSubTable class and just
- # pass what is known about bitmap glyphs from this particular table.
- locator = ttFont[self.__class__.locatorName]
- for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
- for curIndexSubTable in curStrike.indexSubTables:
- dataLocations = []
- for curName in curIndexSubTable.names:
- # Handle the data placement based on seeing the glyph or not.
- # Just save a reference to the location if the glyph has already
- # been saved in compile. This code assumes that glyphs will only
- # be referenced multiple times from indexFormat5. By luck the
- # code may still work when referencing poorly ordered fonts with
- # duplicate references. If there is a font that is unlucky the
- # respective compile methods for the indexSubTables will fail
- # their assertions. All fonts seem to follow this assumption.
- # More complicated packing may be needed if a counter-font exists.
- glyph = curGlyphDict[curName]
- objectId = id(glyph)
- if objectId not in glyphDict:
- data = glyph.compile(ttFont)
- data = curIndexSubTable.padBitmapData(data)
- startByte = dataSize
- dataSize += len(data)
- endByte = dataSize
- dataList.append(data)
- dataLoc = (startByte, endByte)
- glyphDict[objectId] = dataLoc
- else:
- dataLoc = glyphDict[objectId]
- dataLocations.append(dataLoc)
- # Just use the new data locations in the indexSubTable.
- # The respective compile implementations will take care
- # of any of the problems in the convertion that may arise.
- curIndexSubTable.locations = dataLocations
-
- return bytesjoin(dataList)
-
- def toXML(self, writer, ttFont):
- # When exporting to XML if one of the data export formats
- # requires metrics then those metrics may be in the locator.
- # In this case populate the bitmaps with "export metrics".
- if ttFont.bitmapGlyphDataFormat in ('row', 'bitwise'):
- locator = ttFont[self.__class__.locatorName]
- for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
- for curIndexSubTable in curStrike.indexSubTables:
- for curName in curIndexSubTable.names:
- glyph = curGlyphDict[curName]
- # I'm not sure which metrics have priority here.
- # For now if both metrics exist go with glyph metrics.
- if hasattr(glyph, 'metrics'):
- glyph.exportMetrics = glyph.metrics
- else:
- glyph.exportMetrics = curIndexSubTable.metrics
- glyph.exportBitDepth = curStrike.bitmapSizeTable.bitDepth
-
- writer.simpletag("header", [('version', self.version)])
- writer.newline()
- locator = ttFont[self.__class__.locatorName]
- for strikeIndex, bitmapGlyphDict in enumerate(self.strikeData):
- writer.begintag('strikedata', [('index', strikeIndex)])
- writer.newline()
- for curName, curBitmap in bitmapGlyphDict.items():
- curBitmap.toXML(strikeIndex, curName, writer, ttFont)
- writer.endtag('strikedata')
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == 'header':
- self.version = safeEval(attrs['version'])
- elif name == 'strikedata':
- if not hasattr(self, 'strikeData'):
- self.strikeData = []
- strikeIndex = safeEval(attrs['index'])
-
- bitmapGlyphDict = {}
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name[4:].startswith(_bitmapGlyphSubclassPrefix[4:]):
- imageFormat = safeEval(name[len(_bitmapGlyphSubclassPrefix):])
- glyphName = attrs['name']
- imageFormatClass = self.getImageFormatClass(imageFormat)
- curGlyph = imageFormatClass(None, None)
- curGlyph.fromXML(name, attrs, content, ttFont)
- assert glyphName not in bitmapGlyphDict, "Duplicate glyphs with the same name '%s' in the same strike." % glyphName
- bitmapGlyphDict[glyphName] = curGlyph
- else:
- log.warning("%s being ignored by %s", name, self.__class__.__name__)
-
- # Grow the strike data array to the appropriate size. The XML
- # format allows the strike index value to be out of order.
- if strikeIndex >= len(self.strikeData):
- self.strikeData += [None] * (strikeIndex + 1 - len(self.strikeData))
- assert self.strikeData[strikeIndex] is None, "Duplicate strike EBDT indices."
- self.strikeData[strikeIndex] = bitmapGlyphDict
class EbdtComponent(object):
+ def toXML(self, writer, ttFont):
+ writer.begintag("ebdtComponent", [("name", self.name)])
+ writer.newline()
+ for componentName in sstruct.getformat(ebdtComponentFormat)[1][1:]:
+ writer.simpletag(componentName, value=getattr(self, componentName))
+ writer.newline()
+ writer.endtag("ebdtComponent")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.name = attrs["name"]
+ componentNames = set(sstruct.getformat(ebdtComponentFormat)[1][1:])
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name in componentNames:
+ vars(self)[name] = safeEval(attrs["value"])
+ else:
+ log.warning("unknown name '%s' being ignored by EbdtComponent.", name)
- def toXML(self, writer, ttFont):
- writer.begintag('ebdtComponent', [('name', self.name)])
- writer.newline()
- for componentName in sstruct.getformat(ebdtComponentFormat)[1][1:]:
- writer.simpletag(componentName, value=getattr(self, componentName))
- writer.newline()
- writer.endtag('ebdtComponent')
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.name = attrs['name']
- componentNames = set(sstruct.getformat(ebdtComponentFormat)[1][1:])
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name in componentNames:
- vars(self)[name] = safeEval(attrs['value'])
- else:
- log.warning("unknown name '%s' being ignored by EbdtComponent.", name)
# Helper functions for dealing with binary.
+
def _data2binary(data, numBits):
- binaryList = []
- for curByte in data:
- value = byteord(curByte)
- numBitsCut = min(8, numBits)
- for i in range(numBitsCut):
- if value & 0x1:
- binaryList.append('1')
- else:
- binaryList.append('0')
- value = value >> 1
- numBits -= numBitsCut
- return strjoin(binaryList)
+ binaryList = []
+ for curByte in data:
+ value = byteord(curByte)
+ numBitsCut = min(8, numBits)
+ for i in range(numBitsCut):
+ if value & 0x1:
+ binaryList.append("1")
+ else:
+ binaryList.append("0")
+ value = value >> 1
+ numBits -= numBitsCut
+ return strjoin(binaryList)
+
def _binary2data(binary):
- byteList = []
- for bitLoc in range(0, len(binary), 8):
- byteString = binary[bitLoc:bitLoc+8]
- curByte = 0
- for curBit in reversed(byteString):
- curByte = curByte << 1
- if curBit == '1':
- curByte |= 1
- byteList.append(bytechr(curByte))
- return bytesjoin(byteList)
+ byteList = []
+ for bitLoc in range(0, len(binary), 8):
+ byteString = binary[bitLoc : bitLoc + 8]
+ curByte = 0
+ for curBit in reversed(byteString):
+ curByte = curByte << 1
+ if curBit == "1":
+ curByte |= 1
+ byteList.append(bytechr(curByte))
+ return bytesjoin(byteList)
+
def _memoize(f):
- class memodict(dict):
- def __missing__(self, key):
- ret = f(key)
- if len(key) == 1:
- self[key] = ret
- return ret
- return memodict().__getitem__
+ class memodict(dict):
+ def __missing__(self, key):
+ ret = f(key)
+ if isinstance(key, int) or len(key) == 1:
+ self[key] = ret
+ return ret
+
+ return memodict().__getitem__
+
# 00100111 -> 11100100 per byte, not to be confused with little/big endian.
# Bitmap data per byte is in the order that binary is written on the page
@@ -243,524 +269,559 @@ def _memoize(f):
# opposite of what makes sense algorithmically and hence this function.
@_memoize
def _reverseBytes(data):
- if len(data) != 1:
- return bytesjoin(map(_reverseBytes, data))
- byte = byteord(data)
- result = 0
- for i in range(8):
- result = result << 1
- result |= byte & 1
- byte = byte >> 1
- return bytechr(result)
+ r"""
+ >>> bin(ord(_reverseBytes(0b00100111)))
+ '0b11100100'
+ >>> _reverseBytes(b'\x00\xf0')
+ b'\x00\x0f'
+ """
+ if isinstance(data, bytes) and len(data) != 1:
+ return bytesjoin(map(_reverseBytes, data))
+ byte = byteord(data)
+ result = 0
+ for i in range(8):
+ result = result << 1
+ result |= byte & 1
+ byte = byte >> 1
+ return bytechr(result)
+
# This section of code is for reading and writing image data to/from XML.
+
def _writeRawImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
- writer.begintag('rawimagedata')
- writer.newline()
- writer.dumphex(bitmapObject.imageData)
- writer.endtag('rawimagedata')
- writer.newline()
+ writer.begintag("rawimagedata")
+ writer.newline()
+ writer.dumphex(bitmapObject.imageData)
+ writer.endtag("rawimagedata")
+ writer.newline()
+
def _readRawImageData(bitmapObject, name, attrs, content, ttFont):
- bitmapObject.imageData = readHex(content)
+ bitmapObject.imageData = readHex(content)
+
def _writeRowImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
- metrics = bitmapObject.exportMetrics
- del bitmapObject.exportMetrics
- bitDepth = bitmapObject.exportBitDepth
- del bitmapObject.exportBitDepth
-
- writer.begintag('rowimagedata', bitDepth=bitDepth, width=metrics.width, height=metrics.height)
- writer.newline()
- for curRow in range(metrics.height):
- rowData = bitmapObject.getRow(curRow, bitDepth=bitDepth, metrics=metrics)
- writer.simpletag('row', value=hexStr(rowData))
- writer.newline()
- writer.endtag('rowimagedata')
- writer.newline()
+ metrics = bitmapObject.exportMetrics
+ del bitmapObject.exportMetrics
+ bitDepth = bitmapObject.exportBitDepth
+ del bitmapObject.exportBitDepth
+
+ writer.begintag(
+ "rowimagedata", bitDepth=bitDepth, width=metrics.width, height=metrics.height
+ )
+ writer.newline()
+ for curRow in range(metrics.height):
+ rowData = bitmapObject.getRow(curRow, bitDepth=bitDepth, metrics=metrics)
+ writer.simpletag("row", value=hexStr(rowData))
+ writer.newline()
+ writer.endtag("rowimagedata")
+ writer.newline()
+
def _readRowImageData(bitmapObject, name, attrs, content, ttFont):
- bitDepth = safeEval(attrs['bitDepth'])
- metrics = SmallGlyphMetrics()
- metrics.width = safeEval(attrs['width'])
- metrics.height = safeEval(attrs['height'])
-
- dataRows = []
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attr, content = element
- # Chop off 'imagedata' from the tag to get just the option.
- if name == 'row':
- dataRows.append(deHexStr(attr['value']))
- bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics)
+ bitDepth = safeEval(attrs["bitDepth"])
+ metrics = SmallGlyphMetrics()
+ metrics.width = safeEval(attrs["width"])
+ metrics.height = safeEval(attrs["height"])
+
+ dataRows = []
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attr, content = element
+ # Chop off 'imagedata' from the tag to get just the option.
+ if name == "row":
+ dataRows.append(deHexStr(attr["value"]))
+ bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics)
+
def _writeBitwiseImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
- metrics = bitmapObject.exportMetrics
- del bitmapObject.exportMetrics
- bitDepth = bitmapObject.exportBitDepth
- del bitmapObject.exportBitDepth
-
- # A dict for mapping binary to more readable/artistic ASCII characters.
- binaryConv = {'0':'.', '1':'@'}
-
- writer.begintag('bitwiseimagedata', bitDepth=bitDepth, width=metrics.width, height=metrics.height)
- writer.newline()
- for curRow in range(metrics.height):
- rowData = bitmapObject.getRow(curRow, bitDepth=1, metrics=metrics, reverseBytes=True)
- rowData = _data2binary(rowData, metrics.width)
- # Make the output a readable ASCII art form.
- rowData = strjoin(map(binaryConv.get, rowData))
- writer.simpletag('row', value=rowData)
- writer.newline()
- writer.endtag('bitwiseimagedata')
- writer.newline()
+ metrics = bitmapObject.exportMetrics
+ del bitmapObject.exportMetrics
+ bitDepth = bitmapObject.exportBitDepth
+ del bitmapObject.exportBitDepth
+
+ # A dict for mapping binary to more readable/artistic ASCII characters.
+ binaryConv = {"0": ".", "1": "@"}
+
+ writer.begintag(
+ "bitwiseimagedata",
+ bitDepth=bitDepth,
+ width=metrics.width,
+ height=metrics.height,
+ )
+ writer.newline()
+ for curRow in range(metrics.height):
+ rowData = bitmapObject.getRow(
+ curRow, bitDepth=1, metrics=metrics, reverseBytes=True
+ )
+ rowData = _data2binary(rowData, metrics.width)
+ # Make the output a readable ASCII art form.
+ rowData = strjoin(map(binaryConv.get, rowData))
+ writer.simpletag("row", value=rowData)
+ writer.newline()
+ writer.endtag("bitwiseimagedata")
+ writer.newline()
+
def _readBitwiseImageData(bitmapObject, name, attrs, content, ttFont):
- bitDepth = safeEval(attrs['bitDepth'])
- metrics = SmallGlyphMetrics()
- metrics.width = safeEval(attrs['width'])
- metrics.height = safeEval(attrs['height'])
-
- # A dict for mapping from ASCII to binary. All characters are considered
- # a '1' except space, period and '0' which maps to '0'.
- binaryConv = {' ':'0', '.':'0', '0':'0'}
-
- dataRows = []
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attr, content = element
- if name == 'row':
- mapParams = zip(attr['value'], itertools.repeat('1'))
- rowData = strjoin(itertools.starmap(binaryConv.get, mapParams))
- dataRows.append(_binary2data(rowData))
-
- bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics, reverseBytes=True)
+ bitDepth = safeEval(attrs["bitDepth"])
+ metrics = SmallGlyphMetrics()
+ metrics.width = safeEval(attrs["width"])
+ metrics.height = safeEval(attrs["height"])
+
+ # A dict for mapping from ASCII to binary. All characters are considered
+ # a '1' except space, period and '0' which maps to '0'.
+ binaryConv = {" ": "0", ".": "0", "0": "0"}
+
+ dataRows = []
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attr, content = element
+ if name == "row":
+ mapParams = zip(attr["value"], itertools.repeat("1"))
+ rowData = strjoin(itertools.starmap(binaryConv.get, mapParams))
+ dataRows.append(_binary2data(rowData))
+
+ bitmapObject.setRows(
+ dataRows, bitDepth=bitDepth, metrics=metrics, reverseBytes=True
+ )
+
def _writeExtFileImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
- try:
- folder = os.path.dirname(writer.file.name)
- except AttributeError:
- # fall back to current directory if output file's directory isn't found
- folder = '.'
- folder = os.path.join(folder, 'bitmaps')
- filename = glyphName + bitmapObject.fileExtension
- if not os.path.isdir(folder):
- os.makedirs(folder)
- folder = os.path.join(folder, 'strike%d' % strikeIndex)
- if not os.path.isdir(folder):
- os.makedirs(folder)
-
- fullPath = os.path.join(folder, filename)
- writer.simpletag('extfileimagedata', value=fullPath)
- writer.newline()
-
- with open(fullPath, "wb") as file:
- file.write(bitmapObject.imageData)
+ try:
+ folder = os.path.dirname(writer.file.name)
+ except AttributeError:
+ # fall back to current directory if output file's directory isn't found
+ folder = "."
+ folder = os.path.join(folder, "bitmaps")
+ filename = glyphName + bitmapObject.fileExtension
+ if not os.path.isdir(folder):
+ os.makedirs(folder)
+ folder = os.path.join(folder, "strike%d" % strikeIndex)
+ if not os.path.isdir(folder):
+ os.makedirs(folder)
+
+ fullPath = os.path.join(folder, filename)
+ writer.simpletag("extfileimagedata", value=fullPath)
+ writer.newline()
+
+ with open(fullPath, "wb") as file:
+ file.write(bitmapObject.imageData)
+
def _readExtFileImageData(bitmapObject, name, attrs, content, ttFont):
- fullPath = attrs['value']
- with open(fullPath, "rb") as file:
- bitmapObject.imageData = file.read()
+ fullPath = attrs["value"]
+ with open(fullPath, "rb") as file:
+ bitmapObject.imageData = file.read()
+
# End of XML writing code.
# Important information about the naming scheme. Used for identifying formats
# in XML.
-_bitmapGlyphSubclassPrefix = 'ebdt_bitmap_format_'
+_bitmapGlyphSubclassPrefix = "ebdt_bitmap_format_"
-class BitmapGlyph(object):
- # For the external file format. This can be changed in subclasses. This way
- # when the extfile option is turned on files have the form: glyphName.ext
- # The default is just a flat binary file with no meaning.
- fileExtension = '.bin'
-
- # Keep track of reading and writing of various forms.
- xmlDataFunctions = {
- 'raw': (_writeRawImageData, _readRawImageData),
- 'row': (_writeRowImageData, _readRowImageData),
- 'bitwise': (_writeBitwiseImageData, _readBitwiseImageData),
- 'extfile': (_writeExtFileImageData, _readExtFileImageData),
- }
-
- def __init__(self, data, ttFont):
- self.data = data
- self.ttFont = ttFont
- # TODO Currently non-lazy decompilation is untested here...
- #if not ttFont.lazy:
- # self.decompile()
- # del self.data
-
- def __getattr__(self, attr):
- # Allow lazy decompile.
- if attr[:2] == '__':
- raise AttributeError(attr)
- if attr == "data":
- raise AttributeError(attr)
- self.decompile()
- del self.data
- return getattr(self, attr)
-
- def ensureDecompiled(self, recurse=False):
- if hasattr(self, "data"):
- self.decompile()
- del self.data
-
- # Not a fan of this but it is needed for safer safety checking.
- def getFormat(self):
- return safeEval(self.__class__.__name__[len(_bitmapGlyphSubclassPrefix):])
-
- def toXML(self, strikeIndex, glyphName, writer, ttFont):
- writer.begintag(self.__class__.__name__, [('name', glyphName)])
- writer.newline()
-
- self.writeMetrics(writer, ttFont)
- # Use the internal write method to write using the correct output format.
- self.writeData(strikeIndex, glyphName, writer, ttFont)
-
- writer.endtag(self.__class__.__name__)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.readMetrics(name, attrs, content, ttFont)
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attr, content = element
- if not name.endswith('imagedata'):
- continue
- # Chop off 'imagedata' from the tag to get just the option.
- option = name[:-len('imagedata')]
- assert option in self.__class__.xmlDataFunctions
- self.readData(name, attr, content, ttFont)
-
- # Some of the glyphs have the metrics. This allows for metrics to be
- # added if the glyph format has them. Default behavior is to do nothing.
- def writeMetrics(self, writer, ttFont):
- pass
-
- # The opposite of write metrics.
- def readMetrics(self, name, attrs, content, ttFont):
- pass
-
- def writeData(self, strikeIndex, glyphName, writer, ttFont):
- try:
- writeFunc, readFunc = self.__class__.xmlDataFunctions[ttFont.bitmapGlyphDataFormat]
- except KeyError:
- writeFunc = _writeRawImageData
- writeFunc(strikeIndex, glyphName, self, writer, ttFont)
-
- def readData(self, name, attrs, content, ttFont):
- # Chop off 'imagedata' from the tag to get just the option.
- option = name[:-len('imagedata')]
- writeFunc, readFunc = self.__class__.xmlDataFunctions[option]
- readFunc(self, name, attrs, content, ttFont)
+class BitmapGlyph(object):
+ # For the external file format. This can be changed in subclasses. This way
+ # when the extfile option is turned on files have the form: glyphName.ext
+ # The default is just a flat binary file with no meaning.
+ fileExtension = ".bin"
+
+ # Keep track of reading and writing of various forms.
+ xmlDataFunctions = {
+ "raw": (_writeRawImageData, _readRawImageData),
+ "row": (_writeRowImageData, _readRowImageData),
+ "bitwise": (_writeBitwiseImageData, _readBitwiseImageData),
+ "extfile": (_writeExtFileImageData, _readExtFileImageData),
+ }
+
+ def __init__(self, data, ttFont):
+ self.data = data
+ self.ttFont = ttFont
+ # TODO Currently non-lazy decompilation is untested here...
+ # if not ttFont.lazy:
+ # self.decompile()
+ # del self.data
+
+ def __getattr__(self, attr):
+ # Allow lazy decompile.
+ if attr[:2] == "__":
+ raise AttributeError(attr)
+ if attr == "data":
+ raise AttributeError(attr)
+ self.decompile()
+ del self.data
+ return getattr(self, attr)
+
+ def ensureDecompiled(self, recurse=False):
+ if hasattr(self, "data"):
+ self.decompile()
+ del self.data
+
+ # Not a fan of this but it is needed for safer safety checking.
+ def getFormat(self):
+ return safeEval(self.__class__.__name__[len(_bitmapGlyphSubclassPrefix) :])
+
+ def toXML(self, strikeIndex, glyphName, writer, ttFont):
+ writer.begintag(self.__class__.__name__, [("name", glyphName)])
+ writer.newline()
+
+ self.writeMetrics(writer, ttFont)
+ # Use the internal write method to write using the correct output format.
+ self.writeData(strikeIndex, glyphName, writer, ttFont)
+
+ writer.endtag(self.__class__.__name__)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.readMetrics(name, attrs, content, ttFont)
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attr, content = element
+ if not name.endswith("imagedata"):
+ continue
+ # Chop off 'imagedata' from the tag to get just the option.
+ option = name[: -len("imagedata")]
+ assert option in self.__class__.xmlDataFunctions
+ self.readData(name, attr, content, ttFont)
+
+ # Some of the glyphs have the metrics. This allows for metrics to be
+ # added if the glyph format has them. Default behavior is to do nothing.
+ def writeMetrics(self, writer, ttFont):
+ pass
+
+ # The opposite of write metrics.
+ def readMetrics(self, name, attrs, content, ttFont):
+ pass
+
+ def writeData(self, strikeIndex, glyphName, writer, ttFont):
+ try:
+ writeFunc, readFunc = self.__class__.xmlDataFunctions[
+ ttFont.bitmapGlyphDataFormat
+ ]
+ except KeyError:
+ writeFunc = _writeRawImageData
+ writeFunc(strikeIndex, glyphName, self, writer, ttFont)
+
+ def readData(self, name, attrs, content, ttFont):
+ # Chop off 'imagedata' from the tag to get just the option.
+ option = name[: -len("imagedata")]
+ writeFunc, readFunc = self.__class__.xmlDataFunctions[option]
+ readFunc(self, name, attrs, content, ttFont)
# A closure for creating a mixin for the two types of metrics handling.
# Most of the code is very similar so its easier to deal with here.
# Everything works just by passing the class that the mixin is for.
def _createBitmapPlusMetricsMixin(metricsClass):
- # Both metrics names are listed here to make meaningful error messages.
- metricStrings = [BigGlyphMetrics.__name__, SmallGlyphMetrics.__name__]
- curMetricsName = metricsClass.__name__
- # Find which metrics this is for and determine the opposite name.
- metricsId = metricStrings.index(curMetricsName)
- oppositeMetricsName = metricStrings[1-metricsId]
-
- class BitmapPlusMetricsMixin(object):
-
- def writeMetrics(self, writer, ttFont):
- self.metrics.toXML(writer, ttFont)
-
- def readMetrics(self, name, attrs, content, ttFont):
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == curMetricsName:
- self.metrics = metricsClass()
- self.metrics.fromXML(name, attrs, content, ttFont)
- elif name == oppositeMetricsName:
- log.warning("Warning: %s being ignored in format %d.", oppositeMetricsName, self.getFormat())
-
- return BitmapPlusMetricsMixin
+ # Both metrics names are listed here to make meaningful error messages.
+ metricStrings = [BigGlyphMetrics.__name__, SmallGlyphMetrics.__name__]
+ curMetricsName = metricsClass.__name__
+ # Find which metrics this is for and determine the opposite name.
+ metricsId = metricStrings.index(curMetricsName)
+ oppositeMetricsName = metricStrings[1 - metricsId]
+
+ class BitmapPlusMetricsMixin(object):
+ def writeMetrics(self, writer, ttFont):
+ self.metrics.toXML(writer, ttFont)
+
+ def readMetrics(self, name, attrs, content, ttFont):
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == curMetricsName:
+ self.metrics = metricsClass()
+ self.metrics.fromXML(name, attrs, content, ttFont)
+ elif name == oppositeMetricsName:
+ log.warning(
+ "Warning: %s being ignored in format %d.",
+ oppositeMetricsName,
+ self.getFormat(),
+ )
+
+ return BitmapPlusMetricsMixin
+
# Since there are only two types of mixin's just create them here.
BitmapPlusBigMetricsMixin = _createBitmapPlusMetricsMixin(BigGlyphMetrics)
BitmapPlusSmallMetricsMixin = _createBitmapPlusMetricsMixin(SmallGlyphMetrics)
+
# Data that is bit aligned can be tricky to deal with. These classes implement
# helper functionality for dealing with the data and getting a particular row
# of bitwise data. Also helps implement fancy data export/import in XML.
class BitAlignedBitmapMixin(object):
+ def _getBitRange(self, row, bitDepth, metrics):
+ rowBits = bitDepth * metrics.width
+ bitOffset = row * rowBits
+ return (bitOffset, bitOffset + rowBits)
+
+ def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
+ if metrics is None:
+ metrics = self.metrics
+ assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
+
+ # Loop through each byte. This can cover two bytes in the original data or
+ # a single byte if things happen to be aligned. The very last entry might
+ # not be aligned so take care to trim the binary data to size and pad with
+ # zeros in the row data. Bit aligned data is somewhat tricky.
+ #
+ # Example of data cut. Data cut represented in x's.
+ # '|' represents byte boundary.
+ # data = ...0XX|XXXXXX00|000... => XXXXXXXX
+ # or
+ # data = ...0XX|XXXX0000|000... => XXXXXX00
+ # or
+ # data = ...000|XXXXXXXX|000... => XXXXXXXX
+ # or
+ # data = ...000|00XXXX00|000... => XXXX0000
+ #
+ dataList = []
+ bitRange = self._getBitRange(row, bitDepth, metrics)
+ stepRange = bitRange + (8,)
+ for curBit in range(*stepRange):
+ endBit = min(curBit + 8, bitRange[1])
+ numBits = endBit - curBit
+ cutPoint = curBit % 8
+ firstByteLoc = curBit // 8
+ secondByteLoc = endBit // 8
+ if firstByteLoc < secondByteLoc:
+ numBitsCut = 8 - cutPoint
+ else:
+ numBitsCut = endBit - curBit
+ curByte = _reverseBytes(self.imageData[firstByteLoc])
+ firstHalf = byteord(curByte) >> cutPoint
+ firstHalf = ((1 << numBitsCut) - 1) & firstHalf
+ newByte = firstHalf
+ if firstByteLoc < secondByteLoc and secondByteLoc < len(self.imageData):
+ curByte = _reverseBytes(self.imageData[secondByteLoc])
+ secondHalf = byteord(curByte) << numBitsCut
+ newByte = (firstHalf | secondHalf) & ((1 << numBits) - 1)
+ dataList.append(bytechr(newByte))
+
+ # The way the data is kept is opposite the algorithm used.
+ data = bytesjoin(dataList)
+ if not reverseBytes:
+ data = _reverseBytes(data)
+ return data
+
+ def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
+ if metrics is None:
+ metrics = self.metrics
+ if not reverseBytes:
+ dataRows = list(map(_reverseBytes, dataRows))
+
+ # Keep track of a list of ordinal values as they are easier to modify
+ # than a list of strings. Map to actual strings later.
+ numBytes = (self._getBitRange(len(dataRows), bitDepth, metrics)[0] + 7) // 8
+ ordDataList = [0] * numBytes
+ for row, data in enumerate(dataRows):
+ bitRange = self._getBitRange(row, bitDepth, metrics)
+ stepRange = bitRange + (8,)
+ for curBit, curByte in zip(range(*stepRange), data):
+ endBit = min(curBit + 8, bitRange[1])
+ cutPoint = curBit % 8
+ firstByteLoc = curBit // 8
+ secondByteLoc = endBit // 8
+ if firstByteLoc < secondByteLoc:
+ numBitsCut = 8 - cutPoint
+ else:
+ numBitsCut = endBit - curBit
+ curByte = byteord(curByte)
+ firstByte = curByte & ((1 << numBitsCut) - 1)
+ ordDataList[firstByteLoc] |= firstByte << cutPoint
+ if firstByteLoc < secondByteLoc and secondByteLoc < numBytes:
+ secondByte = (curByte >> numBitsCut) & ((1 << 8 - numBitsCut) - 1)
+ ordDataList[secondByteLoc] |= secondByte
+
+ # Save the image data with the bits going the correct way.
+ self.imageData = _reverseBytes(bytesjoin(map(bytechr, ordDataList)))
- def _getBitRange(self, row, bitDepth, metrics):
- rowBits = (bitDepth * metrics.width)
- bitOffset = row * rowBits
- return (bitOffset, bitOffset+rowBits)
-
- def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
- if metrics is None:
- metrics = self.metrics
- assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
-
- # Loop through each byte. This can cover two bytes in the original data or
- # a single byte if things happen to be aligned. The very last entry might
- # not be aligned so take care to trim the binary data to size and pad with
- # zeros in the row data. Bit aligned data is somewhat tricky.
- #
- # Example of data cut. Data cut represented in x's.
- # '|' represents byte boundary.
- # data = ...0XX|XXXXXX00|000... => XXXXXXXX
- # or
- # data = ...0XX|XXXX0000|000... => XXXXXX00
- # or
- # data = ...000|XXXXXXXX|000... => XXXXXXXX
- # or
- # data = ...000|00XXXX00|000... => XXXX0000
- #
- dataList = []
- bitRange = self._getBitRange(row, bitDepth, metrics)
- stepRange = bitRange + (8,)
- for curBit in range(*stepRange):
- endBit = min(curBit+8, bitRange[1])
- numBits = endBit - curBit
- cutPoint = curBit % 8
- firstByteLoc = curBit // 8
- secondByteLoc = endBit // 8
- if firstByteLoc < secondByteLoc:
- numBitsCut = 8 - cutPoint
- else:
- numBitsCut = endBit - curBit
- curByte = _reverseBytes(self.imageData[firstByteLoc])
- firstHalf = byteord(curByte) >> cutPoint
- firstHalf = ((1<<numBitsCut)-1) & firstHalf
- newByte = firstHalf
- if firstByteLoc < secondByteLoc and secondByteLoc < len(self.imageData):
- curByte = _reverseBytes(self.imageData[secondByteLoc])
- secondHalf = byteord(curByte) << numBitsCut
- newByte = (firstHalf | secondHalf) & ((1<<numBits)-1)
- dataList.append(bytechr(newByte))
-
- # The way the data is kept is opposite the algorithm used.
- data = bytesjoin(dataList)
- if not reverseBytes:
- data = _reverseBytes(data)
- return data
-
- def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
- if metrics is None:
- metrics = self.metrics
- if not reverseBytes:
- dataRows = list(map(_reverseBytes, dataRows))
-
- # Keep track of a list of ordinal values as they are easier to modify
- # than a list of strings. Map to actual strings later.
- numBytes = (self._getBitRange(len(dataRows), bitDepth, metrics)[0] + 7) // 8
- ordDataList = [0] * numBytes
- for row, data in enumerate(dataRows):
- bitRange = self._getBitRange(row, bitDepth, metrics)
- stepRange = bitRange + (8,)
- for curBit, curByte in zip(range(*stepRange), data):
- endBit = min(curBit+8, bitRange[1])
- cutPoint = curBit % 8
- firstByteLoc = curBit // 8
- secondByteLoc = endBit // 8
- if firstByteLoc < secondByteLoc:
- numBitsCut = 8 - cutPoint
- else:
- numBitsCut = endBit - curBit
- curByte = byteord(curByte)
- firstByte = curByte & ((1<<numBitsCut)-1)
- ordDataList[firstByteLoc] |= (firstByte << cutPoint)
- if firstByteLoc < secondByteLoc and secondByteLoc < numBytes:
- secondByte = (curByte >> numBitsCut) & ((1<<8-numBitsCut)-1)
- ordDataList[secondByteLoc] |= secondByte
-
- # Save the image data with the bits going the correct way.
- self.imageData = _reverseBytes(bytesjoin(map(bytechr, ordDataList)))
class ByteAlignedBitmapMixin(object):
-
- def _getByteRange(self, row, bitDepth, metrics):
- rowBytes = (bitDepth * metrics.width + 7) // 8
- byteOffset = row * rowBytes
- return (byteOffset, byteOffset+rowBytes)
-
- def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
- if metrics is None:
- metrics = self.metrics
- assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
- byteRange = self._getByteRange(row, bitDepth, metrics)
- data = self.imageData[slice(*byteRange)]
- if reverseBytes:
- data = _reverseBytes(data)
- return data
-
- def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
- if metrics is None:
- metrics = self.metrics
- if reverseBytes:
- dataRows = map(_reverseBytes, dataRows)
- self.imageData = bytesjoin(dataRows)
-
-class ebdt_bitmap_format_1(ByteAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph):
-
- def decompile(self):
- self.metrics = SmallGlyphMetrics()
- dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
- self.imageData = data
-
- def compile(self, ttFont):
- data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
- return data + self.imageData
-
-
-class ebdt_bitmap_format_2(BitAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph):
-
- def decompile(self):
- self.metrics = SmallGlyphMetrics()
- dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
- self.imageData = data
-
- def compile(self, ttFont):
- data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
- return data + self.imageData
+ def _getByteRange(self, row, bitDepth, metrics):
+ rowBytes = (bitDepth * metrics.width + 7) // 8
+ byteOffset = row * rowBytes
+ return (byteOffset, byteOffset + rowBytes)
+
+ def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
+ if metrics is None:
+ metrics = self.metrics
+ assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
+ byteRange = self._getByteRange(row, bitDepth, metrics)
+ data = self.imageData[slice(*byteRange)]
+ if reverseBytes:
+ data = _reverseBytes(data)
+ return data
+
+ def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
+ if metrics is None:
+ metrics = self.metrics
+ if reverseBytes:
+ dataRows = map(_reverseBytes, dataRows)
+ self.imageData = bytesjoin(dataRows)
+
+
+class ebdt_bitmap_format_1(
+ ByteAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph
+):
+ def decompile(self):
+ self.metrics = SmallGlyphMetrics()
+ dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
+ self.imageData = data
+
+ def compile(self, ttFont):
+ data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
+ return data + self.imageData
+
+
+class ebdt_bitmap_format_2(
+ BitAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph
+):
+ def decompile(self):
+ self.metrics = SmallGlyphMetrics()
+ dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
+ self.imageData = data
+
+ def compile(self, ttFont):
+ data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
+ return data + self.imageData
class ebdt_bitmap_format_5(BitAlignedBitmapMixin, BitmapGlyph):
+ def decompile(self):
+ self.imageData = self.data
- def decompile(self):
- self.imageData = self.data
-
- def compile(self, ttFont):
- return self.imageData
+ def compile(self, ttFont):
+ return self.imageData
-class ebdt_bitmap_format_6(ByteAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph):
- def decompile(self):
- self.metrics = BigGlyphMetrics()
- dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
- self.imageData = data
+class ebdt_bitmap_format_6(
+ ByteAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph
+):
+ def decompile(self):
+ self.metrics = BigGlyphMetrics()
+ dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
+ self.imageData = data
- def compile(self, ttFont):
- data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
- return data + self.imageData
+ def compile(self, ttFont):
+ data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
+ return data + self.imageData
-class ebdt_bitmap_format_7(BitAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph):
+class ebdt_bitmap_format_7(
+ BitAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph
+):
+ def decompile(self):
+ self.metrics = BigGlyphMetrics()
+ dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
+ self.imageData = data
- def decompile(self):
- self.metrics = BigGlyphMetrics()
- dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
- self.imageData = data
-
- def compile(self, ttFont):
- data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
- return data + self.imageData
+ def compile(self, ttFont):
+ data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
+ return data + self.imageData
class ComponentBitmapGlyph(BitmapGlyph):
-
- def toXML(self, strikeIndex, glyphName, writer, ttFont):
- writer.begintag(self.__class__.__name__, [('name', glyphName)])
- writer.newline()
-
- self.writeMetrics(writer, ttFont)
-
- writer.begintag('components')
- writer.newline()
- for curComponent in self.componentArray:
- curComponent.toXML(writer, ttFont)
- writer.endtag('components')
- writer.newline()
-
- writer.endtag(self.__class__.__name__)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.readMetrics(name, attrs, content, ttFont)
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attr, content = element
- if name == 'components':
- self.componentArray = []
- for compElement in content:
- if not isinstance(compElement, tuple):
- continue
- name, attrs, content = compElement
- if name == 'ebdtComponent':
- curComponent = EbdtComponent()
- curComponent.fromXML(name, attrs, content, ttFont)
- self.componentArray.append(curComponent)
- else:
- log.warning("'%s' being ignored in component array.", name)
+ def toXML(self, strikeIndex, glyphName, writer, ttFont):
+ writer.begintag(self.__class__.__name__, [("name", glyphName)])
+ writer.newline()
+
+ self.writeMetrics(writer, ttFont)
+
+ writer.begintag("components")
+ writer.newline()
+ for curComponent in self.componentArray:
+ curComponent.toXML(writer, ttFont)
+ writer.endtag("components")
+ writer.newline()
+
+ writer.endtag(self.__class__.__name__)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.readMetrics(name, attrs, content, ttFont)
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attr, content = element
+ if name == "components":
+ self.componentArray = []
+ for compElement in content:
+ if not isinstance(compElement, tuple):
+ continue
+ name, attrs, content = compElement
+ if name == "ebdtComponent":
+ curComponent = EbdtComponent()
+ curComponent.fromXML(name, attrs, content, ttFont)
+ self.componentArray.append(curComponent)
+ else:
+ log.warning("'%s' being ignored in component array.", name)
class ebdt_bitmap_format_8(BitmapPlusSmallMetricsMixin, ComponentBitmapGlyph):
-
- def decompile(self):
- self.metrics = SmallGlyphMetrics()
- dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
- data = data[1:]
-
- (numComponents,) = struct.unpack(">H", data[:2])
- data = data[2:]
- self.componentArray = []
- for i in range(numComponents):
- curComponent = EbdtComponent()
- dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
- curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
- self.componentArray.append(curComponent)
-
- def compile(self, ttFont):
- dataList = []
- dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
- dataList.append(b'\0')
- dataList.append(struct.pack(">H", len(self.componentArray)))
- for curComponent in self.componentArray:
- curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
- dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
- return bytesjoin(dataList)
+ def decompile(self):
+ self.metrics = SmallGlyphMetrics()
+ dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
+ data = data[1:]
+
+ (numComponents,) = struct.unpack(">H", data[:2])
+ data = data[2:]
+ self.componentArray = []
+ for i in range(numComponents):
+ curComponent = EbdtComponent()
+ dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
+ curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
+ self.componentArray.append(curComponent)
+
+ def compile(self, ttFont):
+ dataList = []
+ dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
+ dataList.append(b"\0")
+ dataList.append(struct.pack(">H", len(self.componentArray)))
+ for curComponent in self.componentArray:
+ curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
+ dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
+ return bytesjoin(dataList)
class ebdt_bitmap_format_9(BitmapPlusBigMetricsMixin, ComponentBitmapGlyph):
-
- def decompile(self):
- self.metrics = BigGlyphMetrics()
- dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
- (numComponents,) = struct.unpack(">H", data[:2])
- data = data[2:]
- self.componentArray = []
- for i in range(numComponents):
- curComponent = EbdtComponent()
- dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
- curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
- self.componentArray.append(curComponent)
-
- def compile(self, ttFont):
- dataList = []
- dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
- dataList.append(struct.pack(">H", len(self.componentArray)))
- for curComponent in self.componentArray:
- curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
- dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
- return bytesjoin(dataList)
+ def decompile(self):
+ self.metrics = BigGlyphMetrics()
+ dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
+ (numComponents,) = struct.unpack(">H", data[:2])
+ data = data[2:]
+ self.componentArray = []
+ for i in range(numComponents):
+ curComponent = EbdtComponent()
+ dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
+ curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
+ self.componentArray.append(curComponent)
+
+ def compile(self, ttFont):
+ dataList = []
+ dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
+ dataList.append(struct.pack(">H", len(self.componentArray)))
+ for curComponent in self.componentArray:
+ curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
+ dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
+ return bytesjoin(dataList)
# Dictionary of bitmap formats to the class representing that format
# currently only the ones listed in this map are the ones supported.
ebdt_bitmap_classes = {
- 1: ebdt_bitmap_format_1,
- 2: ebdt_bitmap_format_2,
- 5: ebdt_bitmap_format_5,
- 6: ebdt_bitmap_format_6,
- 7: ebdt_bitmap_format_7,
- 8: ebdt_bitmap_format_8,
- 9: ebdt_bitmap_format_9,
- }
+ 1: ebdt_bitmap_format_1,
+ 2: ebdt_bitmap_format_2,
+ 5: ebdt_bitmap_format_5,
+ 6: ebdt_bitmap_format_6,
+ 7: ebdt_bitmap_format_7,
+ 8: ebdt_bitmap_format_8,
+ 9: ebdt_bitmap_format_9,
+}
diff --git a/Lib/fontTools/ttLib/tables/E_B_L_C_.py b/Lib/fontTools/ttLib/tables/E_B_L_C_.py
index bb3d2140..6046d910 100644
--- a/Lib/fontTools/ttLib/tables/E_B_L_C_.py
+++ b/Lib/fontTools/ttLib/tables/E_B_L_C_.py
@@ -1,7 +1,12 @@
from fontTools.misc import sstruct
from . import DefaultTable
from fontTools.misc.textTools import bytesjoin, safeEval
-from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
+from .BitmapGlyphMetrics import (
+ BigGlyphMetrics,
+ bigGlyphMetricsFormat,
+ SmallGlyphMetrics,
+ smallGlyphMetricsFormat,
+)
import struct
import itertools
from collections import deque
@@ -59,571 +64,647 @@ indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat)
codeOffsetPairFormat = ">HH"
codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat)
+
class table_E_B_L_C_(DefaultTable.DefaultTable):
+ dependencies = ["EBDT"]
+
+ # This method can be overridden in subclasses to support new formats
+ # without changing the other implementation. Also can be used as a
+ # convenience method for coverting a font file to an alternative format.
+ def getIndexFormatClass(self, indexFormat):
+ return eblc_sub_table_classes[indexFormat]
+
+ def decompile(self, data, ttFont):
+ # Save the original data because offsets are from the start of the table.
+ origData = data
+ i = 0
+
+ dummy = sstruct.unpack(eblcHeaderFormat, data[:8], self)
+ i += 8
+
+ self.strikes = []
+ for curStrikeIndex in range(self.numSizes):
+ curStrike = Strike()
+ self.strikes.append(curStrike)
+ curTable = curStrike.bitmapSizeTable
+ dummy = sstruct.unpack2(
+ bitmapSizeTableFormatPart1, data[i : i + 16], curTable
+ )
+ i += 16
+ for metric in ("hori", "vert"):
+ metricObj = SbitLineMetrics()
+ vars(curTable)[metric] = metricObj
+ dummy = sstruct.unpack2(
+ sbitLineMetricsFormat, data[i : i + 12], metricObj
+ )
+ i += 12
+ dummy = sstruct.unpack(
+ bitmapSizeTableFormatPart2, data[i : i + 8], curTable
+ )
+ i += 8
+
+ for curStrike in self.strikes:
+ curTable = curStrike.bitmapSizeTable
+ for subtableIndex in range(curTable.numberOfIndexSubTables):
+ i = (
+ curTable.indexSubTableArrayOffset
+ + subtableIndex * indexSubTableArraySize
+ )
+
+ tup = struct.unpack(
+ indexSubTableArrayFormat, data[i : i + indexSubTableArraySize]
+ )
+ (firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup
+ i = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable
+
+ tup = struct.unpack(
+ indexSubHeaderFormat, data[i : i + indexSubHeaderSize]
+ )
+ (indexFormat, imageFormat, imageDataOffset) = tup
+
+ indexFormatClass = self.getIndexFormatClass(indexFormat)
+ indexSubTable = indexFormatClass(data[i + indexSubHeaderSize :], ttFont)
+ indexSubTable.firstGlyphIndex = firstGlyphIndex
+ indexSubTable.lastGlyphIndex = lastGlyphIndex
+ indexSubTable.additionalOffsetToIndexSubtable = (
+ additionalOffsetToIndexSubtable
+ )
+ indexSubTable.indexFormat = indexFormat
+ indexSubTable.imageFormat = imageFormat
+ indexSubTable.imageDataOffset = imageDataOffset
+ indexSubTable.decompile() # https://github.com/fonttools/fonttools/issues/317
+ curStrike.indexSubTables.append(indexSubTable)
+
+ def compile(self, ttFont):
+ dataList = []
+ self.numSizes = len(self.strikes)
+ dataList.append(sstruct.pack(eblcHeaderFormat, self))
+
+ # Data size of the header + bitmapSizeTable needs to be calculated
+ # in order to form offsets. This value will hold the size of the data
+ # in dataList after all the data is consolidated in dataList.
+ dataSize = len(dataList[0])
+
+ # The table will be structured in the following order:
+ # (0) header
+ # (1) Each bitmapSizeTable [1 ... self.numSizes]
+ # (2) Alternate between indexSubTableArray and indexSubTable
+ # for each bitmapSizeTable present.
+ #
+ # The issue is maintaining the proper offsets when table information
+ # gets moved around. All offsets and size information must be recalculated
+ # when building the table to allow editing within ttLib and also allow easy
+ # import/export to and from XML. All of this offset information is lost
+ # when exporting to XML so everything must be calculated fresh so importing
+ # from XML will work cleanly. Only byte offset and size information is
+ # calculated fresh. Count information like numberOfIndexSubTables is
+ # checked through assertions. If the information in this table was not
+ # touched or was changed properly then these types of values should match.
+ #
+ # The table will be rebuilt the following way:
+ # (0) Precompute the size of all the bitmapSizeTables. This is needed to
+ # compute the offsets properly.
+ # (1) For each bitmapSizeTable compute the indexSubTable and
+ # indexSubTableArray pair. The indexSubTable must be computed first
+ # so that the offset information in indexSubTableArray can be
+ # calculated. Update the data size after each pairing.
+ # (2) Build each bitmapSizeTable.
+ # (3) Consolidate all the data into the main dataList in the correct order.
+
+ for _ in self.strikes:
+ dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1)
+ dataSize += len(("hori", "vert")) * sstruct.calcsize(sbitLineMetricsFormat)
+ dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2)
+
+ indexSubTablePairDataList = []
+ for curStrike in self.strikes:
+ curTable = curStrike.bitmapSizeTable
+ curTable.numberOfIndexSubTables = len(curStrike.indexSubTables)
+ curTable.indexSubTableArrayOffset = dataSize
+
+ # Precompute the size of the indexSubTableArray. This information
+ # is important for correctly calculating the new value for
+ # additionalOffsetToIndexSubtable.
+ sizeOfSubTableArray = (
+ curTable.numberOfIndexSubTables * indexSubTableArraySize
+ )
+ lowerBound = dataSize
+ dataSize += sizeOfSubTableArray
+ upperBound = dataSize
+
+ indexSubTableDataList = []
+ for indexSubTable in curStrike.indexSubTables:
+ indexSubTable.additionalOffsetToIndexSubtable = (
+ dataSize - curTable.indexSubTableArrayOffset
+ )
+ glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names))
+ indexSubTable.firstGlyphIndex = min(glyphIds)
+ indexSubTable.lastGlyphIndex = max(glyphIds)
+ data = indexSubTable.compile(ttFont)
+ indexSubTableDataList.append(data)
+ dataSize += len(data)
+ curTable.startGlyphIndex = min(
+ ist.firstGlyphIndex for ist in curStrike.indexSubTables
+ )
+ curTable.endGlyphIndex = max(
+ ist.lastGlyphIndex for ist in curStrike.indexSubTables
+ )
+
+ for i in curStrike.indexSubTables:
+ data = struct.pack(
+ indexSubHeaderFormat,
+ i.firstGlyphIndex,
+ i.lastGlyphIndex,
+ i.additionalOffsetToIndexSubtable,
+ )
+ indexSubTablePairDataList.append(data)
+ indexSubTablePairDataList.extend(indexSubTableDataList)
+ curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset
+
+ for curStrike in self.strikes:
+ curTable = curStrike.bitmapSizeTable
+ data = sstruct.pack(bitmapSizeTableFormatPart1, curTable)
+ dataList.append(data)
+ for metric in ("hori", "vert"):
+ metricObj = vars(curTable)[metric]
+ data = sstruct.pack(sbitLineMetricsFormat, metricObj)
+ dataList.append(data)
+ data = sstruct.pack(bitmapSizeTableFormatPart2, curTable)
+ dataList.append(data)
+ dataList.extend(indexSubTablePairDataList)
+
+ return bytesjoin(dataList)
+
+ def toXML(self, writer, ttFont):
+ writer.simpletag("header", [("version", self.version)])
+ writer.newline()
+ for curIndex, curStrike in enumerate(self.strikes):
+ curStrike.toXML(curIndex, writer, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "header":
+ self.version = safeEval(attrs["version"])
+ elif name == "strike":
+ if not hasattr(self, "strikes"):
+ self.strikes = []
+ strikeIndex = safeEval(attrs["index"])
+ curStrike = Strike()
+ curStrike.fromXML(name, attrs, content, ttFont, self)
+
+ # Grow the strike array to the appropriate size. The XML format
+ # allows for the strike index value to be out of order.
+ if strikeIndex >= len(self.strikes):
+ self.strikes += [None] * (strikeIndex + 1 - len(self.strikes))
+ assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices."
+ self.strikes[strikeIndex] = curStrike
- dependencies = ['EBDT']
-
- # This method can be overridden in subclasses to support new formats
- # without changing the other implementation. Also can be used as a
- # convenience method for coverting a font file to an alternative format.
- def getIndexFormatClass(self, indexFormat):
- return eblc_sub_table_classes[indexFormat]
-
- def decompile(self, data, ttFont):
-
- # Save the original data because offsets are from the start of the table.
- origData = data
- i = 0;
-
- dummy = sstruct.unpack(eblcHeaderFormat, data[:8], self)
- i += 8;
-
- self.strikes = []
- for curStrikeIndex in range(self.numSizes):
- curStrike = Strike()
- self.strikes.append(curStrike)
- curTable = curStrike.bitmapSizeTable
- dummy = sstruct.unpack2(bitmapSizeTableFormatPart1, data[i:i+16], curTable)
- i += 16
- for metric in ('hori', 'vert'):
- metricObj = SbitLineMetrics()
- vars(curTable)[metric] = metricObj
- dummy = sstruct.unpack2(sbitLineMetricsFormat, data[i:i+12], metricObj)
- i += 12
- dummy = sstruct.unpack(bitmapSizeTableFormatPart2, data[i:i+8], curTable)
- i += 8
-
- for curStrike in self.strikes:
- curTable = curStrike.bitmapSizeTable
- for subtableIndex in range(curTable.numberOfIndexSubTables):
- i = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize
-
- tup = struct.unpack(indexSubTableArrayFormat, data[i:i+indexSubTableArraySize])
- (firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup
- i = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable
-
- tup = struct.unpack(indexSubHeaderFormat, data[i:i+indexSubHeaderSize])
- (indexFormat, imageFormat, imageDataOffset) = tup
-
- indexFormatClass = self.getIndexFormatClass(indexFormat)
- indexSubTable = indexFormatClass(data[i+indexSubHeaderSize:], ttFont)
- indexSubTable.firstGlyphIndex = firstGlyphIndex
- indexSubTable.lastGlyphIndex = lastGlyphIndex
- indexSubTable.additionalOffsetToIndexSubtable = additionalOffsetToIndexSubtable
- indexSubTable.indexFormat = indexFormat
- indexSubTable.imageFormat = imageFormat
- indexSubTable.imageDataOffset = imageDataOffset
- indexSubTable.decompile() # https://github.com/fonttools/fonttools/issues/317
- curStrike.indexSubTables.append(indexSubTable)
-
- def compile(self, ttFont):
-
- dataList = []
- self.numSizes = len(self.strikes)
- dataList.append(sstruct.pack(eblcHeaderFormat, self))
-
- # Data size of the header + bitmapSizeTable needs to be calculated
- # in order to form offsets. This value will hold the size of the data
- # in dataList after all the data is consolidated in dataList.
- dataSize = len(dataList[0])
-
- # The table will be structured in the following order:
- # (0) header
- # (1) Each bitmapSizeTable [1 ... self.numSizes]
- # (2) Alternate between indexSubTableArray and indexSubTable
- # for each bitmapSizeTable present.
- #
- # The issue is maintaining the proper offsets when table information
- # gets moved around. All offsets and size information must be recalculated
- # when building the table to allow editing within ttLib and also allow easy
- # import/export to and from XML. All of this offset information is lost
- # when exporting to XML so everything must be calculated fresh so importing
- # from XML will work cleanly. Only byte offset and size information is
- # calculated fresh. Count information like numberOfIndexSubTables is
- # checked through assertions. If the information in this table was not
- # touched or was changed properly then these types of values should match.
- #
- # The table will be rebuilt the following way:
- # (0) Precompute the size of all the bitmapSizeTables. This is needed to
- # compute the offsets properly.
- # (1) For each bitmapSizeTable compute the indexSubTable and
- # indexSubTableArray pair. The indexSubTable must be computed first
- # so that the offset information in indexSubTableArray can be
- # calculated. Update the data size after each pairing.
- # (2) Build each bitmapSizeTable.
- # (3) Consolidate all the data into the main dataList in the correct order.
-
- for _ in self.strikes:
- dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1)
- dataSize += len(('hori', 'vert')) * sstruct.calcsize(sbitLineMetricsFormat)
- dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2)
-
- indexSubTablePairDataList = []
- for curStrike in self.strikes:
- curTable = curStrike.bitmapSizeTable
- curTable.numberOfIndexSubTables = len(curStrike.indexSubTables)
- curTable.indexSubTableArrayOffset = dataSize
-
- # Precompute the size of the indexSubTableArray. This information
- # is important for correctly calculating the new value for
- # additionalOffsetToIndexSubtable.
- sizeOfSubTableArray = curTable.numberOfIndexSubTables * indexSubTableArraySize
- lowerBound = dataSize
- dataSize += sizeOfSubTableArray
- upperBound = dataSize
-
- indexSubTableDataList = []
- for indexSubTable in curStrike.indexSubTables:
- indexSubTable.additionalOffsetToIndexSubtable = dataSize - curTable.indexSubTableArrayOffset
- glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names))
- indexSubTable.firstGlyphIndex = min(glyphIds)
- indexSubTable.lastGlyphIndex = max(glyphIds)
- data = indexSubTable.compile(ttFont)
- indexSubTableDataList.append(data)
- dataSize += len(data)
- curTable.startGlyphIndex = min(ist.firstGlyphIndex for ist in curStrike.indexSubTables)
- curTable.endGlyphIndex = max(ist.lastGlyphIndex for ist in curStrike.indexSubTables)
-
- for i in curStrike.indexSubTables:
- data = struct.pack(indexSubHeaderFormat, i.firstGlyphIndex, i.lastGlyphIndex, i.additionalOffsetToIndexSubtable)
- indexSubTablePairDataList.append(data)
- indexSubTablePairDataList.extend(indexSubTableDataList)
- curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset
-
- for curStrike in self.strikes:
- curTable = curStrike.bitmapSizeTable
- data = sstruct.pack(bitmapSizeTableFormatPart1, curTable)
- dataList.append(data)
- for metric in ('hori', 'vert'):
- metricObj = vars(curTable)[metric]
- data = sstruct.pack(sbitLineMetricsFormat, metricObj)
- dataList.append(data)
- data = sstruct.pack(bitmapSizeTableFormatPart2, curTable)
- dataList.append(data)
- dataList.extend(indexSubTablePairDataList)
-
- return bytesjoin(dataList)
-
- def toXML(self, writer, ttFont):
- writer.simpletag('header', [('version', self.version)])
- writer.newline()
- for curIndex, curStrike in enumerate(self.strikes):
- curStrike.toXML(curIndex, writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == 'header':
- self.version = safeEval(attrs['version'])
- elif name == 'strike':
- if not hasattr(self, 'strikes'):
- self.strikes = []
- strikeIndex = safeEval(attrs['index'])
- curStrike = Strike()
- curStrike.fromXML(name, attrs, content, ttFont, self)
-
- # Grow the strike array to the appropriate size. The XML format
- # allows for the strike index value to be out of order.
- if strikeIndex >= len(self.strikes):
- self.strikes += [None] * (strikeIndex + 1 - len(self.strikes))
- assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices."
- self.strikes[strikeIndex] = curStrike
class Strike(object):
-
- def __init__(self):
- self.bitmapSizeTable = BitmapSizeTable()
- self.indexSubTables = []
-
- def toXML(self, strikeIndex, writer, ttFont):
- writer.begintag('strike', [('index', strikeIndex)])
- writer.newline()
- self.bitmapSizeTable.toXML(writer, ttFont)
- writer.comment('GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler.')
- writer.newline()
- for indexSubTable in self.indexSubTables:
- indexSubTable.toXML(writer, ttFont)
- writer.endtag('strike')
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont, locator):
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == 'bitmapSizeTable':
- self.bitmapSizeTable.fromXML(name, attrs, content, ttFont)
- elif name.startswith(_indexSubTableSubclassPrefix):
- indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix):])
- indexFormatClass = locator.getIndexFormatClass(indexFormat)
- indexSubTable = indexFormatClass(None, None)
- indexSubTable.indexFormat = indexFormat
- indexSubTable.fromXML(name, attrs, content, ttFont)
- self.indexSubTables.append(indexSubTable)
+ def __init__(self):
+ self.bitmapSizeTable = BitmapSizeTable()
+ self.indexSubTables = []
+
+ def toXML(self, strikeIndex, writer, ttFont):
+ writer.begintag("strike", [("index", strikeIndex)])
+ writer.newline()
+ self.bitmapSizeTable.toXML(writer, ttFont)
+ writer.comment(
+ "GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler."
+ )
+ writer.newline()
+ for indexSubTable in self.indexSubTables:
+ indexSubTable.toXML(writer, ttFont)
+ writer.endtag("strike")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont, locator):
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "bitmapSizeTable":
+ self.bitmapSizeTable.fromXML(name, attrs, content, ttFont)
+ elif name.startswith(_indexSubTableSubclassPrefix):
+ indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix) :])
+ indexFormatClass = locator.getIndexFormatClass(indexFormat)
+ indexSubTable = indexFormatClass(None, None)
+ indexSubTable.indexFormat = indexFormat
+ indexSubTable.fromXML(name, attrs, content, ttFont)
+ self.indexSubTables.append(indexSubTable)
class BitmapSizeTable(object):
-
- # Returns all the simple metric names that bitmap size table
- # cares about in terms of XML creation.
- def _getXMLMetricNames(self):
- dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1]
- dataNames = dataNames + sstruct.getformat(bitmapSizeTableFormatPart2)[1]
- # Skip the first 3 data names because they are byte offsets and counts.
- return dataNames[3:]
-
- def toXML(self, writer, ttFont):
- writer.begintag('bitmapSizeTable')
- writer.newline()
- for metric in ('hori', 'vert'):
- getattr(self, metric).toXML(metric, writer, ttFont)
- for metricName in self._getXMLMetricNames():
- writer.simpletag(metricName, value=getattr(self, metricName))
- writer.newline()
- writer.endtag('bitmapSizeTable')
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- # Create a lookup for all the simple names that make sense to
- # bitmap size table. Only read the information from these names.
- dataNames = set(self._getXMLMetricNames())
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == 'sbitLineMetrics':
- direction = attrs['direction']
- assert direction in ('hori', 'vert'), "SbitLineMetrics direction specified invalid."
- metricObj = SbitLineMetrics()
- metricObj.fromXML(name, attrs, content, ttFont)
- vars(self)[direction] = metricObj
- elif name in dataNames:
- vars(self)[name] = safeEval(attrs['value'])
- else:
- log.warning("unknown name '%s' being ignored in BitmapSizeTable.", name)
+ # Returns all the simple metric names that bitmap size table
+ # cares about in terms of XML creation.
+ def _getXMLMetricNames(self):
+ dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1]
+ dataNames = dataNames + sstruct.getformat(bitmapSizeTableFormatPart2)[1]
+ # Skip the first 3 data names because they are byte offsets and counts.
+ return dataNames[3:]
+
+ def toXML(self, writer, ttFont):
+ writer.begintag("bitmapSizeTable")
+ writer.newline()
+ for metric in ("hori", "vert"):
+ getattr(self, metric).toXML(metric, writer, ttFont)
+ for metricName in self._getXMLMetricNames():
+ writer.simpletag(metricName, value=getattr(self, metricName))
+ writer.newline()
+ writer.endtag("bitmapSizeTable")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ # Create a lookup for all the simple names that make sense to
+ # bitmap size table. Only read the information from these names.
+ dataNames = set(self._getXMLMetricNames())
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "sbitLineMetrics":
+ direction = attrs["direction"]
+ assert direction in (
+ "hori",
+ "vert",
+ ), "SbitLineMetrics direction specified invalid."
+ metricObj = SbitLineMetrics()
+ metricObj.fromXML(name, attrs, content, ttFont)
+ vars(self)[direction] = metricObj
+ elif name in dataNames:
+ vars(self)[name] = safeEval(attrs["value"])
+ else:
+ log.warning("unknown name '%s' being ignored in BitmapSizeTable.", name)
class SbitLineMetrics(object):
+ def toXML(self, name, writer, ttFont):
+ writer.begintag("sbitLineMetrics", [("direction", name)])
+ writer.newline()
+ for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]:
+ writer.simpletag(metricName, value=getattr(self, metricName))
+ writer.newline()
+ writer.endtag("sbitLineMetrics")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1])
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name in metricNames:
+ vars(self)[name] = safeEval(attrs["value"])
- def toXML(self, name, writer, ttFont):
- writer.begintag('sbitLineMetrics', [('direction', name)])
- writer.newline()
- for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]:
- writer.simpletag(metricName, value=getattr(self, metricName))
- writer.newline()
- writer.endtag('sbitLineMetrics')
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1])
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name in metricNames:
- vars(self)[name] = safeEval(attrs['value'])
# Important information about the naming scheme. Used for identifying subtables.
-_indexSubTableSubclassPrefix = 'eblc_index_sub_table_'
+_indexSubTableSubclassPrefix = "eblc_index_sub_table_"
+
class EblcIndexSubTable(object):
+ def __init__(self, data, ttFont):
+ self.data = data
+ self.ttFont = ttFont
+ # TODO Currently non-lazy decompiling doesn't work for this class...
+ # if not ttFont.lazy:
+ # self.decompile()
+ # del self.data, self.ttFont
+
+ def __getattr__(self, attr):
+ # Allow lazy decompile.
+ if attr[:2] == "__":
+ raise AttributeError(attr)
+ if attr == "data":
+ raise AttributeError(attr)
+ self.decompile()
+ return getattr(self, attr)
+
+ def ensureDecompiled(self, recurse=False):
+ if hasattr(self, "data"):
+ self.decompile()
+
+ # This method just takes care of the indexSubHeader. Implementing subclasses
+ # should call it to compile the indexSubHeader and then continue compiling
+ # the remainder of their unique format.
+ def compile(self, ttFont):
+ return struct.pack(
+ indexSubHeaderFormat,
+ self.indexFormat,
+ self.imageFormat,
+ self.imageDataOffset,
+ )
+
+ # Creates the XML for bitmap glyphs. Each index sub table basically makes
+ # the same XML except for specific metric information that is written
+ # out via a method call that a subclass implements optionally.
+ def toXML(self, writer, ttFont):
+ writer.begintag(
+ self.__class__.__name__,
+ [
+ ("imageFormat", self.imageFormat),
+ ("firstGlyphIndex", self.firstGlyphIndex),
+ ("lastGlyphIndex", self.lastGlyphIndex),
+ ],
+ )
+ writer.newline()
+ self.writeMetrics(writer, ttFont)
+ # Write out the names as thats all thats needed to rebuild etc.
+ # For font debugging of consecutive formats the ids are also written.
+ # The ids are not read when moving from the XML format.
+ glyphIds = map(ttFont.getGlyphID, self.names)
+ for glyphName, glyphId in zip(self.names, glyphIds):
+ writer.simpletag("glyphLoc", name=glyphName, id=glyphId)
+ writer.newline()
+ writer.endtag(self.__class__.__name__)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ # Read all the attributes. Even though the glyph indices are
+ # recalculated, they are still read in case there needs to
+ # be an immediate export of the data.
+ self.imageFormat = safeEval(attrs["imageFormat"])
+ self.firstGlyphIndex = safeEval(attrs["firstGlyphIndex"])
+ self.lastGlyphIndex = safeEval(attrs["lastGlyphIndex"])
+
+ self.readMetrics(name, attrs, content, ttFont)
+
+ self.names = []
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "glyphLoc":
+ self.names.append(attrs["name"])
+
+ # A helper method that writes the metrics for the index sub table. It also
+ # is responsible for writing the image size for fixed size data since fixed
+ # size is not recalculated on compile. Default behavior is to do nothing.
+ def writeMetrics(self, writer, ttFont):
+ pass
+
+ # A helper method that is the inverse of writeMetrics.
+ def readMetrics(self, name, attrs, content, ttFont):
+ pass
+
+ # This method is for fixed glyph data sizes. There are formats where
+ # the glyph data is fixed but are actually composite glyphs. To handle
+ # this the font spec in indexSubTable makes the data the size of the
+ # fixed size by padding the component arrays. This function abstracts
+ # out this padding process. Input is data unpadded. Output is data
+ # padded only in fixed formats. Default behavior is to return the data.
+ def padBitmapData(self, data):
+ return data
+
+ # Remove any of the glyph locations and names that are flagged as skipped.
+ # This only occurs in formats {1,3}.
+ def removeSkipGlyphs(self):
+ # Determines if a name, location pair is a valid data location.
+ # Skip glyphs are marked when the size is equal to zero.
+ def isValidLocation(args):
+ (name, (startByte, endByte)) = args
+ return startByte < endByte
+
+ # Remove all skip glyphs.
+ dataPairs = list(filter(isValidLocation, zip(self.names, self.locations)))
+ self.names, self.locations = list(map(list, zip(*dataPairs)))
- def __init__(self, data, ttFont):
- self.data = data
- self.ttFont = ttFont
- # TODO Currently non-lazy decompiling doesn't work for this class...
- #if not ttFont.lazy:
- # self.decompile()
- # del self.data, self.ttFont
-
- def __getattr__(self, attr):
- # Allow lazy decompile.
- if attr[:2] == '__':
- raise AttributeError(attr)
- if attr == "data":
- raise AttributeError(attr)
- self.decompile()
- return getattr(self, attr)
-
- def ensureDecompiled(self, recurse=False):
- if hasattr(self, "data"):
- self.decompile()
-
- # This method just takes care of the indexSubHeader. Implementing subclasses
- # should call it to compile the indexSubHeader and then continue compiling
- # the remainder of their unique format.
- def compile(self, ttFont):
- return struct.pack(indexSubHeaderFormat, self.indexFormat, self.imageFormat, self.imageDataOffset)
-
- # Creates the XML for bitmap glyphs. Each index sub table basically makes
- # the same XML except for specific metric information that is written
- # out via a method call that a subclass implements optionally.
- def toXML(self, writer, ttFont):
- writer.begintag(self.__class__.__name__, [
- ('imageFormat', self.imageFormat),
- ('firstGlyphIndex', self.firstGlyphIndex),
- ('lastGlyphIndex', self.lastGlyphIndex),
- ])
- writer.newline()
- self.writeMetrics(writer, ttFont)
- # Write out the names as thats all thats needed to rebuild etc.
- # For font debugging of consecutive formats the ids are also written.
- # The ids are not read when moving from the XML format.
- glyphIds = map(ttFont.getGlyphID, self.names)
- for glyphName, glyphId in zip(self.names, glyphIds):
- writer.simpletag('glyphLoc', name=glyphName, id=glyphId)
- writer.newline()
- writer.endtag(self.__class__.__name__)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- # Read all the attributes. Even though the glyph indices are
- # recalculated, they are still read in case there needs to
- # be an immediate export of the data.
- self.imageFormat = safeEval(attrs['imageFormat'])
- self.firstGlyphIndex = safeEval(attrs['firstGlyphIndex'])
- self.lastGlyphIndex = safeEval(attrs['lastGlyphIndex'])
-
- self.readMetrics(name, attrs, content, ttFont)
-
- self.names = []
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == 'glyphLoc':
- self.names.append(attrs['name'])
-
- # A helper method that writes the metrics for the index sub table. It also
- # is responsible for writing the image size for fixed size data since fixed
- # size is not recalculated on compile. Default behavior is to do nothing.
- def writeMetrics(self, writer, ttFont):
- pass
-
- # A helper method that is the inverse of writeMetrics.
- def readMetrics(self, name, attrs, content, ttFont):
- pass
-
- # This method is for fixed glyph data sizes. There are formats where
- # the glyph data is fixed but are actually composite glyphs. To handle
- # this the font spec in indexSubTable makes the data the size of the
- # fixed size by padding the component arrays. This function abstracts
- # out this padding process. Input is data unpadded. Output is data
- # padded only in fixed formats. Default behavior is to return the data.
- def padBitmapData(self, data):
- return data
-
- # Remove any of the glyph locations and names that are flagged as skipped.
- # This only occurs in formats {1,3}.
- def removeSkipGlyphs(self):
- # Determines if a name, location pair is a valid data location.
- # Skip glyphs are marked when the size is equal to zero.
- def isValidLocation(args):
- (name, (startByte, endByte)) = args
- return startByte < endByte
- # Remove all skip glyphs.
- dataPairs = list(filter(isValidLocation, zip(self.names, self.locations)))
- self.names, self.locations = list(map(list, zip(*dataPairs)))
# A closure for creating a custom mixin. This is done because formats 1 and 3
# are very similar. The only difference between them is the size per offset
# value. Code put in here should handle both cases generally.
def _createOffsetArrayIndexSubTableMixin(formatStringForDataType):
+ # Prep the data size for the offset array data format.
+ dataFormat = ">" + formatStringForDataType
+ offsetDataSize = struct.calcsize(dataFormat)
+
+ class OffsetArrayIndexSubTableMixin(object):
+ def decompile(self):
+ numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1
+ indexingOffsets = [
+ glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs + 2)
+ ]
+ indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
+ offsetArray = [
+ struct.unpack(dataFormat, self.data[slice(*loc)])[0]
+ for loc in indexingLocations
+ ]
+
+ glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
+ modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray]
+ self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:]))
+
+ self.names = list(map(self.ttFont.getGlyphName, glyphIds))
+ self.removeSkipGlyphs()
+ del self.data, self.ttFont
+
+ def compile(self, ttFont):
+ # First make sure that all the data lines up properly. Formats 1 and 3
+ # must have all its data lined up consecutively. If not this will fail.
+ for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
+ assert (
+ curLoc[1] == nxtLoc[0]
+ ), "Data must be consecutive in indexSubTable offset formats"
+
+ glyphIds = list(map(ttFont.getGlyphID, self.names))
+ # Make sure that all ids are sorted strictly increasing.
+ assert all(glyphIds[i] < glyphIds[i + 1] for i in range(len(glyphIds) - 1))
+
+ # Run a simple algorithm to add skip glyphs to the data locations at
+ # the places where an id is not present.
+ idQueue = deque(glyphIds)
+ locQueue = deque(self.locations)
+ allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
+ allLocations = []
+ for curId in allGlyphIds:
+ if curId != idQueue[0]:
+ allLocations.append((locQueue[0][0], locQueue[0][0]))
+ else:
+ idQueue.popleft()
+ allLocations.append(locQueue.popleft())
+
+ # Now that all the locations are collected, pack them appropriately into
+ # offsets. This is the form where offset[i] is the location and
+ # offset[i+1]-offset[i] is the size of the data location.
+ offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]]
+ # Image data offset must be less than or equal to the minimum of locations.
+ # This offset may change the value for round tripping but is safer and
+ # allows imageDataOffset to not be required to be in the XML version.
+ self.imageDataOffset = min(offsets)
+ offsetArray = [offset - self.imageDataOffset for offset in offsets]
+
+ dataList = [EblcIndexSubTable.compile(self, ttFont)]
+ dataList += [
+ struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray
+ ]
+ # Take care of any padding issues. Only occurs in format 3.
+ if offsetDataSize * len(offsetArray) % 4 != 0:
+ dataList.append(struct.pack(dataFormat, 0))
+ return bytesjoin(dataList)
+
+ return OffsetArrayIndexSubTableMixin
- # Prep the data size for the offset array data format.
- dataFormat = '>'+formatStringForDataType
- offsetDataSize = struct.calcsize(dataFormat)
-
- class OffsetArrayIndexSubTableMixin(object):
-
- def decompile(self):
-
- numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1
- indexingOffsets = [glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs+2)]
- indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
- offsetArray = [struct.unpack(dataFormat, self.data[slice(*loc)])[0] for loc in indexingLocations]
-
- glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
- modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray]
- self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:]))
-
- self.names = list(map(self.ttFont.getGlyphName, glyphIds))
- self.removeSkipGlyphs()
- del self.data, self.ttFont
-
- def compile(self, ttFont):
- # First make sure that all the data lines up properly. Formats 1 and 3
- # must have all its data lined up consecutively. If not this will fail.
- for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
- assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable offset formats"
-
- glyphIds = list(map(ttFont.getGlyphID, self.names))
- # Make sure that all ids are sorted strictly increasing.
- assert all(glyphIds[i] < glyphIds[i+1] for i in range(len(glyphIds)-1))
-
- # Run a simple algorithm to add skip glyphs to the data locations at
- # the places where an id is not present.
- idQueue = deque(glyphIds)
- locQueue = deque(self.locations)
- allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
- allLocations = []
- for curId in allGlyphIds:
- if curId != idQueue[0]:
- allLocations.append((locQueue[0][0], locQueue[0][0]))
- else:
- idQueue.popleft()
- allLocations.append(locQueue.popleft())
-
- # Now that all the locations are collected, pack them appropriately into
- # offsets. This is the form where offset[i] is the location and
- # offset[i+1]-offset[i] is the size of the data location.
- offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]]
- # Image data offset must be less than or equal to the minimum of locations.
- # This offset may change the value for round tripping but is safer and
- # allows imageDataOffset to not be required to be in the XML version.
- self.imageDataOffset = min(offsets)
- offsetArray = [offset - self.imageDataOffset for offset in offsets]
-
- dataList = [EblcIndexSubTable.compile(self, ttFont)]
- dataList += [struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray]
- # Take care of any padding issues. Only occurs in format 3.
- if offsetDataSize * len(offsetArray) % 4 != 0:
- dataList.append(struct.pack(dataFormat, 0))
- return bytesjoin(dataList)
-
- return OffsetArrayIndexSubTableMixin
# A Mixin for functionality shared between the different kinds
# of fixed sized data handling. Both kinds have big metrics so
# that kind of special processing is also handled in this mixin.
class FixedSizeIndexSubTableMixin(object):
+ def writeMetrics(self, writer, ttFont):
+ writer.simpletag("imageSize", value=self.imageSize)
+ writer.newline()
+ self.metrics.toXML(writer, ttFont)
+
+ def readMetrics(self, name, attrs, content, ttFont):
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "imageSize":
+ self.imageSize = safeEval(attrs["value"])
+ elif name == BigGlyphMetrics.__name__:
+ self.metrics = BigGlyphMetrics()
+ self.metrics.fromXML(name, attrs, content, ttFont)
+ elif name == SmallGlyphMetrics.__name__:
+ log.warning(
+ "SmallGlyphMetrics being ignored in format %d.", self.indexFormat
+ )
+
+ def padBitmapData(self, data):
+ # Make sure that the data isn't bigger than the fixed size.
+ assert len(data) <= self.imageSize, (
+ "Data in indexSubTable format %d must be less than the fixed size."
+ % self.indexFormat
+ )
+ # Pad the data so that it matches the fixed size.
+ pad = (self.imageSize - len(data)) * b"\0"
+ return data + pad
+
+
+class eblc_index_sub_table_1(
+ _createOffsetArrayIndexSubTableMixin("L"), EblcIndexSubTable
+):
+ pass
- def writeMetrics(self, writer, ttFont):
- writer.simpletag('imageSize', value=self.imageSize)
- writer.newline()
- self.metrics.toXML(writer, ttFont)
-
- def readMetrics(self, name, attrs, content, ttFont):
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == 'imageSize':
- self.imageSize = safeEval(attrs['value'])
- elif name == BigGlyphMetrics.__name__:
- self.metrics = BigGlyphMetrics()
- self.metrics.fromXML(name, attrs, content, ttFont)
- elif name == SmallGlyphMetrics.__name__:
- log.warning("SmallGlyphMetrics being ignored in format %d.", self.indexFormat)
-
- def padBitmapData(self, data):
- # Make sure that the data isn't bigger than the fixed size.
- assert len(data) <= self.imageSize, "Data in indexSubTable format %d must be less than the fixed size." % self.indexFormat
- # Pad the data so that it matches the fixed size.
- pad = (self.imageSize - len(data)) * b'\0'
- return data + pad
-
-class eblc_index_sub_table_1(_createOffsetArrayIndexSubTableMixin('L'), EblcIndexSubTable):
- pass
class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
+ def decompile(self):
+ (self.imageSize,) = struct.unpack(">L", self.data[:4])
+ self.metrics = BigGlyphMetrics()
+ sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics)
+ glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
+ offsets = [
+ self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds) + 1)
+ ]
+ self.locations = list(zip(offsets, offsets[1:]))
+ self.names = list(map(self.ttFont.getGlyphName, glyphIds))
+ del self.data, self.ttFont
+
+ def compile(self, ttFont):
+ glyphIds = list(map(ttFont.getGlyphID, self.names))
+ # Make sure all the ids are consecutive. This is required by Format 2.
+ assert glyphIds == list(
+ range(self.firstGlyphIndex, self.lastGlyphIndex + 1)
+ ), "Format 2 ids must be consecutive."
+ self.imageDataOffset = min(next(iter(zip(*self.locations))))
+
+ dataList = [EblcIndexSubTable.compile(self, ttFont)]
+ dataList.append(struct.pack(">L", self.imageSize))
+ dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
+ return bytesjoin(dataList)
+
+
+class eblc_index_sub_table_3(
+ _createOffsetArrayIndexSubTableMixin("H"), EblcIndexSubTable
+):
+ pass
- def decompile(self):
- (self.imageSize,) = struct.unpack(">L", self.data[:4])
- self.metrics = BigGlyphMetrics()
- sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics)
- glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
- offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
- self.locations = list(zip(offsets, offsets[1:]))
- self.names = list(map(self.ttFont.getGlyphName, glyphIds))
- del self.data, self.ttFont
-
- def compile(self, ttFont):
- glyphIds = list(map(ttFont.getGlyphID, self.names))
- # Make sure all the ids are consecutive. This is required by Format 2.
- assert glyphIds == list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)), "Format 2 ids must be consecutive."
- self.imageDataOffset = min(next(iter(zip(*self.locations))))
-
- dataList = [EblcIndexSubTable.compile(self, ttFont)]
- dataList.append(struct.pack(">L", self.imageSize))
- dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
- return bytesjoin(dataList)
-
-class eblc_index_sub_table_3(_createOffsetArrayIndexSubTableMixin('H'), EblcIndexSubTable):
- pass
class eblc_index_sub_table_4(EblcIndexSubTable):
+ def decompile(self):
+ (numGlyphs,) = struct.unpack(">L", self.data[:4])
+ data = self.data[4:]
+ indexingOffsets = [
+ glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs + 2)
+ ]
+ indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
+ glyphArray = [
+ struct.unpack(codeOffsetPairFormat, data[slice(*loc)])
+ for loc in indexingLocations
+ ]
+ glyphIds, offsets = list(map(list, zip(*glyphArray)))
+ # There are one too many glyph ids. Get rid of the last one.
+ glyphIds.pop()
+
+ offsets = [offset + self.imageDataOffset for offset in offsets]
+ self.locations = list(zip(offsets, offsets[1:]))
+ self.names = list(map(self.ttFont.getGlyphName, glyphIds))
+ del self.data, self.ttFont
+
+ def compile(self, ttFont):
+ # First make sure that all the data lines up properly. Format 4
+ # must have all its data lined up consecutively. If not this will fail.
+ for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
+ assert (
+ curLoc[1] == nxtLoc[0]
+ ), "Data must be consecutive in indexSubTable format 4"
+
+ offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]]
+ # Image data offset must be less than or equal to the minimum of locations.
+ # Resetting this offset may change the value for round tripping but is safer
+ # and allows imageDataOffset to not be required to be in the XML version.
+ self.imageDataOffset = min(offsets)
+ offsets = [offset - self.imageDataOffset for offset in offsets]
+ glyphIds = list(map(ttFont.getGlyphID, self.names))
+ # Create an iterator over the ids plus a padding value.
+ idsPlusPad = list(itertools.chain(glyphIds, [0]))
+
+ dataList = [EblcIndexSubTable.compile(self, ttFont)]
+ dataList.append(struct.pack(">L", len(glyphIds)))
+ tmp = [
+ struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)
+ ]
+ dataList += tmp
+ data = bytesjoin(dataList)
+ return data
- def decompile(self):
-
- (numGlyphs,) = struct.unpack(">L", self.data[:4])
- data = self.data[4:]
- indexingOffsets = [glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs+2)]
- indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
- glyphArray = [struct.unpack(codeOffsetPairFormat, data[slice(*loc)]) for loc in indexingLocations]
- glyphIds, offsets = list(map(list, zip(*glyphArray)))
- # There are one too many glyph ids. Get rid of the last one.
- glyphIds.pop()
-
- offsets = [offset + self.imageDataOffset for offset in offsets]
- self.locations = list(zip(offsets, offsets[1:]))
- self.names = list(map(self.ttFont.getGlyphName, glyphIds))
- del self.data, self.ttFont
-
- def compile(self, ttFont):
- # First make sure that all the data lines up properly. Format 4
- # must have all its data lined up consecutively. If not this will fail.
- for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
- assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable format 4"
-
- offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]]
- # Image data offset must be less than or equal to the minimum of locations.
- # Resetting this offset may change the value for round tripping but is safer
- # and allows imageDataOffset to not be required to be in the XML version.
- self.imageDataOffset = min(offsets)
- offsets = [offset - self.imageDataOffset for offset in offsets]
- glyphIds = list(map(ttFont.getGlyphID, self.names))
- # Create an iterator over the ids plus a padding value.
- idsPlusPad = list(itertools.chain(glyphIds, [0]))
-
- dataList = [EblcIndexSubTable.compile(self, ttFont)]
- dataList.append(struct.pack(">L", len(glyphIds)))
- tmp = [struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)]
- dataList += tmp
- data = bytesjoin(dataList)
- return data
class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
+ def decompile(self):
+ self.origDataLen = 0
+ (self.imageSize,) = struct.unpack(">L", self.data[:4])
+ data = self.data[4:]
+ self.metrics, data = sstruct.unpack2(
+ bigGlyphMetricsFormat, data, BigGlyphMetrics()
+ )
+ (numGlyphs,) = struct.unpack(">L", data[:4])
+ data = data[4:]
+ glyphIds = [
+ struct.unpack(">H", data[2 * i : 2 * (i + 1)])[0] for i in range(numGlyphs)
+ ]
+
+ offsets = [
+ self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds) + 1)
+ ]
+ self.locations = list(zip(offsets, offsets[1:]))
+ self.names = list(map(self.ttFont.getGlyphName, glyphIds))
+ del self.data, self.ttFont
+
+ def compile(self, ttFont):
+ self.imageDataOffset = min(next(iter(zip(*self.locations))))
+ dataList = [EblcIndexSubTable.compile(self, ttFont)]
+ dataList.append(struct.pack(">L", self.imageSize))
+ dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
+ glyphIds = list(map(ttFont.getGlyphID, self.names))
+ dataList.append(struct.pack(">L", len(glyphIds)))
+ dataList += [struct.pack(">H", curId) for curId in glyphIds]
+ if len(glyphIds) % 2 == 1:
+ dataList.append(struct.pack(">H", 0))
+ return bytesjoin(dataList)
- def decompile(self):
- self.origDataLen = 0
- (self.imageSize,) = struct.unpack(">L", self.data[:4])
- data = self.data[4:]
- self.metrics, data = sstruct.unpack2(bigGlyphMetricsFormat, data, BigGlyphMetrics())
- (numGlyphs,) = struct.unpack(">L", data[:4])
- data = data[4:]
- glyphIds = [struct.unpack(">H", data[2*i:2*(i+1)])[0] for i in range(numGlyphs)]
-
- offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
- self.locations = list(zip(offsets, offsets[1:]))
- self.names = list(map(self.ttFont.getGlyphName, glyphIds))
- del self.data, self.ttFont
-
- def compile(self, ttFont):
- self.imageDataOffset = min(next(iter(zip(*self.locations))))
- dataList = [EblcIndexSubTable.compile(self, ttFont)]
- dataList.append(struct.pack(">L", self.imageSize))
- dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
- glyphIds = list(map(ttFont.getGlyphID, self.names))
- dataList.append(struct.pack(">L", len(glyphIds)))
- dataList += [struct.pack(">H", curId) for curId in glyphIds]
- if len(glyphIds) % 2 == 1:
- dataList.append(struct.pack(">H", 0))
- return bytesjoin(dataList)
# Dictionary of indexFormat to the class representing that format.
eblc_sub_table_classes = {
- 1: eblc_index_sub_table_1,
- 2: eblc_index_sub_table_2,
- 3: eblc_index_sub_table_3,
- 4: eblc_index_sub_table_4,
- 5: eblc_index_sub_table_5,
- }
+ 1: eblc_index_sub_table_1,
+ 2: eblc_index_sub_table_2,
+ 3: eblc_index_sub_table_3,
+ 4: eblc_index_sub_table_4,
+ 5: eblc_index_sub_table_5,
+}
diff --git a/Lib/fontTools/ttLib/tables/F_F_T_M_.py b/Lib/fontTools/ttLib/tables/F_F_T_M_.py
index 2376f2db..823ced1b 100644
--- a/Lib/fontTools/ttLib/tables/F_F_T_M_.py
+++ b/Lib/fontTools/ttLib/tables/F_F_T_M_.py
@@ -11,30 +11,32 @@ FFTMFormat = """
sourceModified: Q
"""
-class table_F_F_T_M_(DefaultTable.DefaultTable):
- def decompile(self, data, ttFont):
- dummy, rest = sstruct.unpack2(FFTMFormat, data, self)
+class table_F_F_T_M_(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ dummy, rest = sstruct.unpack2(FFTMFormat, data, self)
- def compile(self, ttFont):
- data = sstruct.pack(FFTMFormat, self)
- return data
+ def compile(self, ttFont):
+ data = sstruct.pack(FFTMFormat, self)
+ return data
- def toXML(self, writer, ttFont):
- writer.comment("FontForge's timestamp, font source creation and modification dates")
- writer.newline()
- formatstring, names, fixes = sstruct.getformat(FFTMFormat)
- for name in names:
- value = getattr(self, name)
- if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
- value = timestampToString(value)
- writer.simpletag(name, value=value)
- writer.newline()
+ def toXML(self, writer, ttFont):
+ writer.comment(
+ "FontForge's timestamp, font source creation and modification dates"
+ )
+ writer.newline()
+ formatstring, names, fixes = sstruct.getformat(FFTMFormat)
+ for name in names:
+ value = getattr(self, name)
+ if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
+ value = timestampToString(value)
+ writer.simpletag(name, value=value)
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- value = attrs["value"]
- if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
- value = timestampFromString(value)
- else:
- value = safeEval(value)
- setattr(self, name, value)
+ def fromXML(self, name, attrs, content, ttFont):
+ value = attrs["value"]
+ if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
+ value = timestampFromString(value)
+ else:
+ value = safeEval(value)
+ setattr(self, name, value)
diff --git a/Lib/fontTools/ttLib/tables/F__e_a_t.py b/Lib/fontTools/ttLib/tables/F__e_a_t.py
index a444c11d..fbcd6ca6 100644
--- a/Lib/fontTools/ttLib/tables/F__e_a_t.py
+++ b/Lib/fontTools/ttLib/tables/F__e_a_t.py
@@ -5,10 +5,11 @@ from . import DefaultTable
from . import grUtils
import struct
-Feat_hdr_format='''
+Feat_hdr_format = """
>
version: 16.16F
-'''
+"""
+
class table_F__e_a_t(DefaultTable.DefaultTable):
"""The ``Feat`` table is used exclusively by the Graphite shaping engine
@@ -25,28 +26,30 @@ class table_F__e_a_t(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
(_, data) = sstruct.unpack2(Feat_hdr_format, data, self)
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
- numFeats, = struct.unpack('>H', data[:2])
+ (numFeats,) = struct.unpack(">H", data[:2])
data = data[8:]
allfeats = []
maxsetting = 0
for i in range(numFeats):
if self.version >= 2.0:
- (fid, nums, _, offset, flags, lid) = struct.unpack(">LHHLHH",
- data[16*i:16*(i+1)])
+ (fid, nums, _, offset, flags, lid) = struct.unpack(
+ ">LHHLHH", data[16 * i : 16 * (i + 1)]
+ )
offset = int((offset - 12 - 16 * numFeats) / 4)
else:
- (fid, nums, offset, flags, lid) = struct.unpack(">HHLHH",
- data[12*i:12*(i+1)])
+ (fid, nums, offset, flags, lid) = struct.unpack(
+ ">HHLHH", data[12 * i : 12 * (i + 1)]
+ )
offset = int((offset - 12 - 12 * numFeats) / 4)
allfeats.append((fid, nums, offset, flags, lid))
maxsetting = max(maxsetting, offset + nums)
- data = data[16*numFeats:]
+ data = data[16 * numFeats :]
allsettings = []
for i in range(maxsetting):
if len(data) >= 4 * (i + 1):
- (val, lid) = struct.unpack(">HH", data[4*i:4*(i+1)])
+ (val, lid) = struct.unpack(">HH", data[4 * i : 4 * (i + 1)])
allsettings.append((val, lid))
- for i,f in enumerate(allfeats):
+ for i, f in enumerate(allfeats):
(fid, nums, offset, flags, lid) = f
fobj = Feature()
fobj.flags = flags
@@ -56,7 +59,8 @@ class table_F__e_a_t(DefaultTable.DefaultTable):
fobj.default = None
fobj.index = i
for i in range(offset, offset + nums):
- if i >= len(allsettings): continue
+ if i >= len(allsettings):
+ continue
(vid, vlid) = allsettings[i]
fobj.settings[vid] = vlid
if fobj.default is None:
@@ -66,54 +70,75 @@ class table_F__e_a_t(DefaultTable.DefaultTable):
fdat = b""
vdat = b""
offset = 0
- for f, v in sorted(self.features.items(), key=lambda x:x[1].index):
+ for f, v in sorted(self.features.items(), key=lambda x: x[1].index):
fnum = grUtils.tag2num(f)
if self.version >= 2.0:
- fdat += struct.pack(">LHHLHH", grUtils.tag2num(f), len(v.settings),
- 0, offset * 4 + 12 + 16 * len(self.features), v.flags, v.label)
- elif fnum > 65535: # self healing for alphabetic ids
+ fdat += struct.pack(
+ ">LHHLHH",
+ grUtils.tag2num(f),
+ len(v.settings),
+ 0,
+ offset * 4 + 12 + 16 * len(self.features),
+ v.flags,
+ v.label,
+ )
+ elif fnum > 65535: # self healing for alphabetic ids
self.version = 2.0
return self.compile(ttFont)
else:
- fdat += struct.pack(">HHLHH", grUtils.tag2num(f), len(v.settings),
- offset * 4 + 12 + 12 * len(self.features), v.flags, v.label)
- for s, l in sorted(v.settings.items(), key=lambda x:(-1, x[1]) if x[0] == v.default else x):
+ fdat += struct.pack(
+ ">HHLHH",
+ grUtils.tag2num(f),
+ len(v.settings),
+ offset * 4 + 12 + 12 * len(self.features),
+ v.flags,
+ v.label,
+ )
+ for s, l in sorted(
+ v.settings.items(), key=lambda x: (-1, x[1]) if x[0] == v.default else x
+ ):
vdat += struct.pack(">HH", s, l)
offset += len(v.settings)
hdr = sstruct.pack(Feat_hdr_format, self)
- return hdr + struct.pack('>HHL', len(self.features), 0, 0) + fdat + vdat
+ return hdr + struct.pack(">HHL", len(self.features), 0, 0) + fdat + vdat
def toXML(self, writer, ttFont):
- writer.simpletag('version', version=self.version)
+ writer.simpletag("version", version=self.version)
writer.newline()
- for f, v in sorted(self.features.items(), key=lambda x:x[1].index):
- writer.begintag('feature', fid=f, label=v.label, flags=v.flags,
- default=(v.default if v.default else 0))
+ for f, v in sorted(self.features.items(), key=lambda x: x[1].index):
+ writer.begintag(
+ "feature",
+ fid=f,
+ label=v.label,
+ flags=v.flags,
+ default=(v.default if v.default else 0),
+ )
writer.newline()
for s, l in sorted(v.settings.items()):
- writer.simpletag('setting', value=s, label=l)
+ writer.simpletag("setting", value=s, label=l)
writer.newline()
- writer.endtag('feature')
+ writer.endtag("feature")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
- if name == 'version':
- self.version = float(safeEval(attrs['version']))
- elif name == 'feature':
- fid = attrs['fid']
+ if name == "version":
+ self.version = float(safeEval(attrs["version"]))
+ elif name == "feature":
+ fid = attrs["fid"]
fobj = Feature()
- fobj.flags = int(safeEval(attrs['flags']))
- fobj.label = int(safeEval(attrs['label']))
- fobj.default = int(safeEval(attrs.get('default','0')))
+ fobj.flags = int(safeEval(attrs["flags"]))
+ fobj.label = int(safeEval(attrs["label"]))
+ fobj.default = int(safeEval(attrs.get("default", "0")))
fobj.index = len(self.features)
self.features[fid] = fobj
fobj.settings = {}
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, a, c = element
- if tag == 'setting':
- fobj.settings[int(safeEval(a['value']))] = int(safeEval(a['label']))
+ if tag == "setting":
+ fobj.settings[int(safeEval(a["value"]))] = int(safeEval(a["label"]))
+
class Feature(object):
pass
-
diff --git a/Lib/fontTools/ttLib/tables/G_D_E_F_.py b/Lib/fontTools/ttLib/tables/G_D_E_F_.py
index d4a57414..d8ae8b23 100644
--- a/Lib/fontTools/ttLib/tables/G_D_E_F_.py
+++ b/Lib/fontTools/ttLib/tables/G_D_E_F_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_G_D_E_F_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/G_M_A_P_.py b/Lib/fontTools/ttLib/tables/G_M_A_P_.py
index 833890da..949ef842 100644
--- a/Lib/fontTools/ttLib/tables/G_M_A_P_.py
+++ b/Lib/fontTools/ttLib/tables/G_M_A_P_.py
@@ -25,102 +25,117 @@ GMAPRecordFormat1 = """
class GMAPRecord(object):
- def __init__(self, uv=0, cid=0, gid=0, ggid=0, name=""):
- self.UV = uv
- self.cid = cid
- self.gid = gid
- self.ggid = ggid
- self.name = name
+ def __init__(self, uv=0, cid=0, gid=0, ggid=0, name=""):
+ self.UV = uv
+ self.cid = cid
+ self.gid = gid
+ self.ggid = ggid
+ self.name = name
- def toXML(self, writer, ttFont):
- writer.begintag("GMAPRecord")
- writer.newline()
- writer.simpletag("UV", value=self.UV)
- writer.newline()
- writer.simpletag("cid", value=self.cid)
- writer.newline()
- writer.simpletag("gid", value=self.gid)
- writer.newline()
- writer.simpletag("glyphletGid", value=self.gid)
- writer.newline()
- writer.simpletag("GlyphletName", value=self.name)
- writer.newline()
- writer.endtag("GMAPRecord")
- writer.newline()
+ def toXML(self, writer, ttFont):
+ writer.begintag("GMAPRecord")
+ writer.newline()
+ writer.simpletag("UV", value=self.UV)
+ writer.newline()
+ writer.simpletag("cid", value=self.cid)
+ writer.newline()
+ writer.simpletag("gid", value=self.gid)
+ writer.newline()
+ writer.simpletag("glyphletGid", value=self.gid)
+ writer.newline()
+ writer.simpletag("GlyphletName", value=self.name)
+ writer.newline()
+ writer.endtag("GMAPRecord")
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- value = attrs["value"]
- if name == "GlyphletName":
- self.name = value
- else:
- setattr(self, name, safeEval(value))
+ def fromXML(self, name, attrs, content, ttFont):
+ value = attrs["value"]
+ if name == "GlyphletName":
+ self.name = value
+ else:
+ setattr(self, name, safeEval(value))
- def compile(self, ttFont):
- if self.UV is None:
- self.UV = 0
- nameLen = len(self.name)
- if nameLen < 32:
- self.name = self.name + "\0"*(32 - nameLen)
- data = sstruct.pack(GMAPRecordFormat1, self)
- return data
+ def compile(self, ttFont):
+ if self.UV is None:
+ self.UV = 0
+ nameLen = len(self.name)
+ if nameLen < 32:
+ self.name = self.name + "\0" * (32 - nameLen)
+ data = sstruct.pack(GMAPRecordFormat1, self)
+ return data
- def __repr__(self):
- return "GMAPRecord[ UV: " + str(self.UV) + ", cid: " + str(self.cid) + ", gid: " + str(self.gid) + ", ggid: " + str(self.ggid) + ", Glyphlet Name: " + str(self.name) + " ]"
+ def __repr__(self):
+ return (
+ "GMAPRecord[ UV: "
+ + str(self.UV)
+ + ", cid: "
+ + str(self.cid)
+ + ", gid: "
+ + str(self.gid)
+ + ", ggid: "
+ + str(self.ggid)
+ + ", Glyphlet Name: "
+ + str(self.name)
+ + " ]"
+ )
class table_G_M_A_P_(DefaultTable.DefaultTable):
+ dependencies = []
- dependencies = []
+ def decompile(self, data, ttFont):
+ dummy, newData = sstruct.unpack2(GMAPFormat, data, self)
+ self.psFontName = tostr(newData[: self.fontNameLength])
+ assert (
+ self.recordsOffset % 4
+ ) == 0, "GMAP error: recordsOffset is not 32 bit aligned."
+ newData = data[self.recordsOffset :]
+ self.gmapRecords = []
+ for i in range(self.recordsCount):
+ gmapRecord, newData = sstruct.unpack2(
+ GMAPRecordFormat1, newData, GMAPRecord()
+ )
+ gmapRecord.name = gmapRecord.name.strip("\0")
+ self.gmapRecords.append(gmapRecord)
- def decompile(self, data, ttFont):
- dummy, newData = sstruct.unpack2(GMAPFormat, data, self)
- self.psFontName = tostr(newData[:self.fontNameLength])
- assert (self.recordsOffset % 4) == 0, "GMAP error: recordsOffset is not 32 bit aligned."
- newData = data[self.recordsOffset:]
- self.gmapRecords = []
- for i in range (self.recordsCount):
- gmapRecord, newData = sstruct.unpack2(GMAPRecordFormat1, newData, GMAPRecord())
- gmapRecord.name = gmapRecord.name.strip('\0')
- self.gmapRecords.append(gmapRecord)
+ def compile(self, ttFont):
+ self.recordsCount = len(self.gmapRecords)
+ self.fontNameLength = len(self.psFontName)
+ self.recordsOffset = 4 * (((self.fontNameLength + 12) + 3) // 4)
+ data = sstruct.pack(GMAPFormat, self)
+ data = data + tobytes(self.psFontName)
+ data = data + b"\0" * (self.recordsOffset - len(data))
+ for record in self.gmapRecords:
+ data = data + record.compile(ttFont)
+ return data
- def compile(self, ttFont):
- self.recordsCount = len(self.gmapRecords)
- self.fontNameLength = len(self.psFontName)
- self.recordsOffset = 4 * (((self.fontNameLength + 12) + 3) // 4)
- data = sstruct.pack(GMAPFormat, self)
- data = data + tobytes(self.psFontName)
- data = data + b"\0" * (self.recordsOffset - len(data))
- for record in self.gmapRecords:
- data = data + record.compile(ttFont)
- return data
+ def toXML(self, writer, ttFont):
+ writer.comment("Most of this table will be recalculated by the compiler")
+ writer.newline()
+ formatstring, names, fixes = sstruct.getformat(GMAPFormat)
+ for name in names:
+ value = getattr(self, name)
+ writer.simpletag(name, value=value)
+ writer.newline()
+ writer.simpletag("PSFontName", value=self.psFontName)
+ writer.newline()
+ for gmapRecord in self.gmapRecords:
+ gmapRecord.toXML(writer, ttFont)
- def toXML(self, writer, ttFont):
- writer.comment("Most of this table will be recalculated by the compiler")
- writer.newline()
- formatstring, names, fixes = sstruct.getformat(GMAPFormat)
- for name in names:
- value = getattr(self, name)
- writer.simpletag(name, value=value)
- writer.newline()
- writer.simpletag("PSFontName", value=self.psFontName)
- writer.newline()
- for gmapRecord in self.gmapRecords:
- gmapRecord.toXML(writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "GMAPRecord":
- if not hasattr(self, "gmapRecords"):
- self.gmapRecords = []
- gmapRecord = GMAPRecord()
- self.gmapRecords.append(gmapRecord)
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- gmapRecord.fromXML(name, attrs, content, ttFont)
- else:
- value = attrs["value"]
- if name == "PSFontName":
- self.psFontName = value
- else:
- setattr(self, name, safeEval(value))
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "GMAPRecord":
+ if not hasattr(self, "gmapRecords"):
+ self.gmapRecords = []
+ gmapRecord = GMAPRecord()
+ self.gmapRecords.append(gmapRecord)
+ for element in content:
+ if isinstance(element, str):
+ continue
+ name, attrs, content = element
+ gmapRecord.fromXML(name, attrs, content, ttFont)
+ else:
+ value = attrs["value"]
+ if name == "PSFontName":
+ self.psFontName = value
+ else:
+ setattr(self, name, safeEval(value))
diff --git a/Lib/fontTools/ttLib/tables/G_P_K_G_.py b/Lib/fontTools/ttLib/tables/G_P_K_G_.py
index 4f469c02..eed34d92 100644
--- a/Lib/fontTools/ttLib/tables/G_P_K_G_.py
+++ b/Lib/fontTools/ttLib/tables/G_P_K_G_.py
@@ -16,108 +16,111 @@ GPKGFormat = """
class table_G_P_K_G_(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ dummy, newData = sstruct.unpack2(GPKGFormat, data, self)
- def decompile(self, data, ttFont):
- dummy, newData = sstruct.unpack2(GPKGFormat, data, self)
+ GMAPoffsets = array.array("I")
+ endPos = (self.numGMAPs + 1) * 4
+ GMAPoffsets.frombytes(newData[:endPos])
+ if sys.byteorder != "big":
+ GMAPoffsets.byteswap()
+ self.GMAPs = []
+ for i in range(self.numGMAPs):
+ start = GMAPoffsets[i]
+ end = GMAPoffsets[i + 1]
+ self.GMAPs.append(data[start:end])
+ pos = endPos
+ endPos = pos + (self.numGlyplets + 1) * 4
+ glyphletOffsets = array.array("I")
+ glyphletOffsets.frombytes(newData[pos:endPos])
+ if sys.byteorder != "big":
+ glyphletOffsets.byteswap()
+ self.glyphlets = []
+ for i in range(self.numGlyplets):
+ start = glyphletOffsets[i]
+ end = glyphletOffsets[i + 1]
+ self.glyphlets.append(data[start:end])
- GMAPoffsets = array.array("I")
- endPos = (self.numGMAPs+1) * 4
- GMAPoffsets.frombytes(newData[:endPos])
- if sys.byteorder != "big": GMAPoffsets.byteswap()
- self.GMAPs = []
- for i in range(self.numGMAPs):
- start = GMAPoffsets[i]
- end = GMAPoffsets[i+1]
- self.GMAPs.append(data[start:end])
- pos = endPos
- endPos = pos + (self.numGlyplets + 1)*4
- glyphletOffsets = array.array("I")
- glyphletOffsets.frombytes(newData[pos:endPos])
- if sys.byteorder != "big": glyphletOffsets.byteswap()
- self.glyphlets = []
- for i in range(self.numGlyplets):
- start = glyphletOffsets[i]
- end = glyphletOffsets[i+1]
- self.glyphlets.append(data[start:end])
+ def compile(self, ttFont):
+ self.numGMAPs = len(self.GMAPs)
+ self.numGlyplets = len(self.glyphlets)
+ GMAPoffsets = [0] * (self.numGMAPs + 1)
+ glyphletOffsets = [0] * (self.numGlyplets + 1)
- def compile(self, ttFont):
- self.numGMAPs = len(self.GMAPs)
- self.numGlyplets = len(self.glyphlets)
- GMAPoffsets = [0]*(self.numGMAPs + 1)
- glyphletOffsets = [0]*(self.numGlyplets + 1)
+ dataList = [sstruct.pack(GPKGFormat, self)]
- dataList =[ sstruct.pack(GPKGFormat, self)]
+ pos = len(dataList[0]) + (self.numGMAPs + 1) * 4 + (self.numGlyplets + 1) * 4
+ GMAPoffsets[0] = pos
+ for i in range(1, self.numGMAPs + 1):
+ pos += len(self.GMAPs[i - 1])
+ GMAPoffsets[i] = pos
+ gmapArray = array.array("I", GMAPoffsets)
+ if sys.byteorder != "big":
+ gmapArray.byteswap()
+ dataList.append(gmapArray.tobytes())
- pos = len(dataList[0]) + (self.numGMAPs + 1)*4 + (self.numGlyplets + 1)*4
- GMAPoffsets[0] = pos
- for i in range(1, self.numGMAPs +1):
- pos += len(self.GMAPs[i-1])
- GMAPoffsets[i] = pos
- gmapArray = array.array("I", GMAPoffsets)
- if sys.byteorder != "big": gmapArray.byteswap()
- dataList.append(gmapArray.tobytes())
+ glyphletOffsets[0] = pos
+ for i in range(1, self.numGlyplets + 1):
+ pos += len(self.glyphlets[i - 1])
+ glyphletOffsets[i] = pos
+ glyphletArray = array.array("I", glyphletOffsets)
+ if sys.byteorder != "big":
+ glyphletArray.byteswap()
+ dataList.append(glyphletArray.tobytes())
+ dataList += self.GMAPs
+ dataList += self.glyphlets
+ data = bytesjoin(dataList)
+ return data
- glyphletOffsets[0] = pos
- for i in range(1, self.numGlyplets +1):
- pos += len(self.glyphlets[i-1])
- glyphletOffsets[i] = pos
- glyphletArray = array.array("I", glyphletOffsets)
- if sys.byteorder != "big": glyphletArray.byteswap()
- dataList.append(glyphletArray.tobytes())
- dataList += self.GMAPs
- dataList += self.glyphlets
- data = bytesjoin(dataList)
- return data
+ def toXML(self, writer, ttFont):
+ writer.comment("Most of this table will be recalculated by the compiler")
+ writer.newline()
+ formatstring, names, fixes = sstruct.getformat(GPKGFormat)
+ for name in names:
+ value = getattr(self, name)
+ writer.simpletag(name, value=value)
+ writer.newline()
- def toXML(self, writer, ttFont):
- writer.comment("Most of this table will be recalculated by the compiler")
- writer.newline()
- formatstring, names, fixes = sstruct.getformat(GPKGFormat)
- for name in names:
- value = getattr(self, name)
- writer.simpletag(name, value=value)
- writer.newline()
+ writer.begintag("GMAPs")
+ writer.newline()
+ for gmapData in self.GMAPs:
+ writer.begintag("hexdata")
+ writer.newline()
+ writer.dumphex(gmapData)
+ writer.endtag("hexdata")
+ writer.newline()
+ writer.endtag("GMAPs")
+ writer.newline()
- writer.begintag("GMAPs")
- writer.newline()
- for gmapData in self.GMAPs:
- writer.begintag("hexdata")
- writer.newline()
- writer.dumphex(gmapData)
- writer.endtag("hexdata")
- writer.newline()
- writer.endtag("GMAPs")
- writer.newline()
+ writer.begintag("glyphlets")
+ writer.newline()
+ for glyphletData in self.glyphlets:
+ writer.begintag("hexdata")
+ writer.newline()
+ writer.dumphex(glyphletData)
+ writer.endtag("hexdata")
+ writer.newline()
+ writer.endtag("glyphlets")
+ writer.newline()
- writer.begintag("glyphlets")
- writer.newline()
- for glyphletData in self.glyphlets:
- writer.begintag("hexdata")
- writer.newline()
- writer.dumphex(glyphletData)
- writer.endtag("hexdata")
- writer.newline()
- writer.endtag("glyphlets")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "GMAPs":
- if not hasattr(self, "GMAPs"):
- self.GMAPs = []
- for element in content:
- if isinstance(element, str):
- continue
- itemName, itemAttrs, itemContent = element
- if itemName == "hexdata":
- self.GMAPs.append(readHex(itemContent))
- elif name == "glyphlets":
- if not hasattr(self, "glyphlets"):
- self.glyphlets = []
- for element in content:
- if isinstance(element, str):
- continue
- itemName, itemAttrs, itemContent = element
- if itemName == "hexdata":
- self.glyphlets.append(readHex(itemContent))
- else:
- setattr(self, name, safeEval(attrs["value"]))
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "GMAPs":
+ if not hasattr(self, "GMAPs"):
+ self.GMAPs = []
+ for element in content:
+ if isinstance(element, str):
+ continue
+ itemName, itemAttrs, itemContent = element
+ if itemName == "hexdata":
+ self.GMAPs.append(readHex(itemContent))
+ elif name == "glyphlets":
+ if not hasattr(self, "glyphlets"):
+ self.glyphlets = []
+ for element in content:
+ if isinstance(element, str):
+ continue
+ itemName, itemAttrs, itemContent = element
+ if itemName == "hexdata":
+ self.glyphlets.append(readHex(itemContent))
+ else:
+ setattr(self, name, safeEval(attrs["value"]))
diff --git a/Lib/fontTools/ttLib/tables/G_P_O_S_.py b/Lib/fontTools/ttLib/tables/G_P_O_S_.py
index 013c8209..ca8290ba 100644
--- a/Lib/fontTools/ttLib/tables/G_P_O_S_.py
+++ b/Lib/fontTools/ttLib/tables/G_P_O_S_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_G_P_O_S_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/G_S_U_B_.py b/Lib/fontTools/ttLib/tables/G_S_U_B_.py
index 44036490..bb8375a5 100644
--- a/Lib/fontTools/ttLib/tables/G_S_U_B_.py
+++ b/Lib/fontTools/ttLib/tables/G_S_U_B_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_G_S_U_B_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/G__l_a_t.py b/Lib/fontTools/ttLib/tables/G__l_a_t.py
index a4e8e38f..f1dfdaa0 100644
--- a/Lib/fontTools/ttLib/tables/G__l_a_t.py
+++ b/Lib/fontTools/ttLib/tables/G__l_a_t.py
@@ -1,6 +1,7 @@
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr
from fontTools.misc.textTools import safeEval
+
# from itertools import *
from functools import partial
from . import DefaultTable
@@ -51,16 +52,19 @@ Glat_format_3_subbox_entry = """
diagPosMax: B # Defines maximum positively-sloped diagonal (da)
"""
-class _Object() :
+
+class _Object:
pass
-class _Dict(dict) :
+
+class _Dict(dict):
pass
+
class table_G__l_a_t(DefaultTable.DefaultTable):
- '''
+ """
Support Graphite Glat tables
- '''
+ """
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
@@ -70,31 +74,31 @@ class table_G__l_a_t(DefaultTable.DefaultTable):
sstruct.unpack2(Glat_format_0, data, self)
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
if self.version <= 1.9:
- decoder = partial(self.decompileAttributes12,fmt=Glat_format_1_entry)
- elif self.version <= 2.9:
- decoder = partial(self.decompileAttributes12,fmt=Glat_format_23_entry)
+ decoder = partial(self.decompileAttributes12, fmt=Glat_format_1_entry)
+ elif self.version <= 2.9:
+ decoder = partial(self.decompileAttributes12, fmt=Glat_format_23_entry)
elif self.version >= 3.0:
(data, self.scheme) = grUtils.decompress(data)
sstruct.unpack2(Glat_format_3, data, self)
self.hasOctaboxes = (self.compression & 1) == 1
decoder = self.decompileAttributes3
-
- gloc = ttFont['Gloc']
+
+ gloc = ttFont["Gloc"]
self.attributes = {}
count = 0
- for s,e in zip(gloc,gloc[1:]):
+ for s, e in zip(gloc, gloc[1:]):
self.attributes[ttFont.getGlyphName(count)] = decoder(data[s:e])
count += 1
-
+
def decompileAttributes12(self, data, fmt):
attributes = _Dict()
while len(data) > 3:
e, data = sstruct.unpack2(fmt, data, _Object())
- keys = range(e.attNum, e.attNum+e.num)
- if len(data) >= 2 * e.num :
- vals = struct.unpack_from(('>%dh' % e.num), data)
- attributes.update(zip(keys,vals))
- data = data[2*e.num:]
+ keys = range(e.attNum, e.attNum + e.num)
+ if len(data) >= 2 * e.num:
+ vals = struct.unpack_from((">%dh" % e.num), data)
+ attributes.update(zip(keys, vals))
+ data = data[2 * e.num :]
return attributes
def decompileAttributes3(self, data):
@@ -103,9 +107,10 @@ class table_G__l_a_t(DefaultTable.DefaultTable):
numsub = bin(o.subboxBitmap).count("1")
o.subboxes = []
for b in range(numsub):
- if len(data) >= 8 :
- subbox, data = sstruct.unpack2(Glat_format_3_subbox_entry,
- data, _Object())
+ if len(data) >= 8:
+ subbox, data = sstruct.unpack2(
+ Glat_format_3_subbox_entry, data, _Object()
+ )
o.subboxes.append(subbox)
attrs = self.decompileAttributes12(data, Glat_format_23_entry)
if self.hasOctaboxes:
@@ -128,7 +133,7 @@ class table_G__l_a_t(DefaultTable.DefaultTable):
glocs.append(len(data))
data += encoder(self.attributes[ttFont.getGlyphName(n)])
glocs.append(len(data))
- ttFont['Gloc'].set(glocs)
+ ttFont["Gloc"].set(glocs)
if self.version >= 3.0:
data = grUtils.compress(self.scheme, data)
@@ -137,82 +142,93 @@ class table_G__l_a_t(DefaultTable.DefaultTable):
def compileAttributes12(self, attrs, fmt):
data = b""
for e in grUtils.entries(attrs):
- data += sstruct.pack(fmt, {'attNum' : e[0], 'num' : e[1]}) + \
- struct.pack(('>%dh' % len(e[2])), *e[2])
+ data += sstruct.pack(fmt, {"attNum": e[0], "num": e[1]}) + struct.pack(
+ (">%dh" % len(e[2])), *e[2]
+ )
return data
-
+
def compileAttributes3(self, attrs):
if self.hasOctaboxes:
o = attrs.octabox
data = sstruct.pack(Glat_format_3_octabox_metrics, o)
numsub = bin(o.subboxBitmap).count("1")
- for b in range(numsub) :
+ for b in range(numsub):
data += sstruct.pack(Glat_format_3_subbox_entry, o.subboxes[b])
else:
data = ""
return data + self.compileAttributes12(attrs, Glat_format_23_entry)
def toXML(self, writer, ttFont):
- writer.simpletag('version', version=self.version, compressionScheme=self.scheme)
+ writer.simpletag("version", version=self.version, compressionScheme=self.scheme)
writer.newline()
- for n, a in sorted(self.attributes.items(), key=lambda x:ttFont.getGlyphID(x[0])):
- writer.begintag('glyph', name=n)
+ for n, a in sorted(
+ self.attributes.items(), key=lambda x: ttFont.getGlyphID(x[0])
+ ):
+ writer.begintag("glyph", name=n)
writer.newline()
- if hasattr(a, 'octabox'):
+ if hasattr(a, "octabox"):
o = a.octabox
- formatstring, names, fixes = sstruct.getformat(Glat_format_3_octabox_metrics)
+ formatstring, names, fixes = sstruct.getformat(
+ Glat_format_3_octabox_metrics
+ )
vals = {}
for k in names:
- if k == 'subboxBitmap': continue
- vals[k] = "{:.3f}%".format(getattr(o, k) * 100. / 255)
- vals['bitmap'] = "{:0X}".format(o.subboxBitmap)
- writer.begintag('octaboxes', **vals)
+ if k == "subboxBitmap":
+ continue
+ vals[k] = "{:.3f}%".format(getattr(o, k) * 100.0 / 255)
+ vals["bitmap"] = "{:0X}".format(o.subboxBitmap)
+ writer.begintag("octaboxes", **vals)
writer.newline()
- formatstring, names, fixes = sstruct.getformat(Glat_format_3_subbox_entry)
+ formatstring, names, fixes = sstruct.getformat(
+ Glat_format_3_subbox_entry
+ )
for s in o.subboxes:
vals = {}
for k in names:
- vals[k] = "{:.3f}%".format(getattr(s, k) * 100. / 255)
- writer.simpletag('octabox', **vals)
+ vals[k] = "{:.3f}%".format(getattr(s, k) * 100.0 / 255)
+ writer.simpletag("octabox", **vals)
writer.newline()
- writer.endtag('octaboxes')
+ writer.endtag("octaboxes")
writer.newline()
for k, v in sorted(a.items()):
- writer.simpletag('attribute', index=k, value=v)
+ writer.simpletag("attribute", index=k, value=v)
writer.newline()
- writer.endtag('glyph')
+ writer.endtag("glyph")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
- if name == 'version' :
- self.version = float(safeEval(attrs['version']))
- self.scheme = int(safeEval(attrs['compressionScheme']))
- if name != 'glyph' : return
- if not hasattr(self, 'attributes'):
+ if name == "version":
+ self.version = float(safeEval(attrs["version"]))
+ self.scheme = int(safeEval(attrs["compressionScheme"]))
+ if name != "glyph":
+ return
+ if not hasattr(self, "attributes"):
self.attributes = {}
- gname = attrs['name']
+ gname = attrs["name"]
attributes = _Dict()
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, attrs, subcontent = element
- if tag == 'attribute' :
- k = int(safeEval(attrs['index']))
- v = int(safeEval(attrs['value']))
- attributes[k]=v
- elif tag == 'octaboxes':
+ if tag == "attribute":
+ k = int(safeEval(attrs["index"]))
+ v = int(safeEval(attrs["value"]))
+ attributes[k] = v
+ elif tag == "octaboxes":
self.hasOctaboxes = True
o = _Object()
- o.subboxBitmap = int(attrs['bitmap'], 16)
+ o.subboxBitmap = int(attrs["bitmap"], 16)
o.subboxes = []
- del attrs['bitmap']
+ del attrs["bitmap"]
for k, v in attrs.items():
- setattr(o, k, int(float(v[:-1]) * 255. / 100. + 0.5))
+ setattr(o, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5))
for element in subcontent:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
(tag, attrs, subcontent) = element
so = _Object()
for k, v in attrs.items():
- setattr(so, k, int(float(v[:-1]) * 255. / 100. + 0.5))
+ setattr(so, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5))
o.subboxes.append(so)
attributes.octabox = o
self.attributes[gname] = attributes
diff --git a/Lib/fontTools/ttLib/tables/G__l_o_c.py b/Lib/fontTools/ttLib/tables/G__l_o_c.py
index fa114a31..7973b9be 100644
--- a/Lib/fontTools/ttLib/tables/G__l_o_c.py
+++ b/Lib/fontTools/ttLib/tables/G__l_o_c.py
@@ -5,19 +5,21 @@ import array
import sys
-Gloc_header = '''
+Gloc_header = """
> # big endian
version: 16.16F # Table version
flags: H # bit 0: 1=long format, 0=short format
# bit 1: 1=attribute names, 0=no names
numAttribs: H # NUmber of attributes
-'''
+"""
+
class table_G__l_o_c(DefaultTable.DefaultTable):
"""
Support Graphite Gloc tables
"""
- dependencies = ['Glat']
+
+ dependencies = ["Glat"]
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
@@ -28,38 +30,49 @@ class table_G__l_o_c(DefaultTable.DefaultTable):
_, data = sstruct.unpack2(Gloc_header, data, self)
flags = self.flags
del self.flags
- self.locations = array.array('I' if flags & 1 else 'H')
- self.locations.frombytes(data[:len(data) - self.numAttribs * (flags & 2)])
- if sys.byteorder != "big": self.locations.byteswap()
- self.attribIds = array.array('H')
+ self.locations = array.array("I" if flags & 1 else "H")
+ self.locations.frombytes(data[: len(data) - self.numAttribs * (flags & 2)])
+ if sys.byteorder != "big":
+ self.locations.byteswap()
+ self.attribIds = array.array("H")
if flags & 2:
- self.attribIds.frombytes(data[-self.numAttribs * 2:])
- if sys.byteorder != "big": self.attribIds.byteswap()
+ self.attribIds.frombytes(data[-self.numAttribs * 2 :])
+ if sys.byteorder != "big":
+ self.attribIds.byteswap()
def compile(self, ttFont):
- data = sstruct.pack(Gloc_header, dict(version=1.0,
- flags=(bool(self.attribIds) << 1) + (self.locations.typecode == 'I'),
- numAttribs=self.numAttribs))
- if sys.byteorder != "big": self.locations.byteswap()
+ data = sstruct.pack(
+ Gloc_header,
+ dict(
+ version=1.0,
+ flags=(bool(self.attribIds) << 1) + (self.locations.typecode == "I"),
+ numAttribs=self.numAttribs,
+ ),
+ )
+ if sys.byteorder != "big":
+ self.locations.byteswap()
data += self.locations.tobytes()
- if sys.byteorder != "big": self.locations.byteswap()
+ if sys.byteorder != "big":
+ self.locations.byteswap()
if self.attribIds:
- if sys.byteorder != "big": self.attribIds.byteswap()
+ if sys.byteorder != "big":
+ self.attribIds.byteswap()
data += self.attribIds.tobytes()
- if sys.byteorder != "big": self.attribIds.byteswap()
+ if sys.byteorder != "big":
+ self.attribIds.byteswap()
return data
def set(self, locations):
long_format = max(locations) >= 65536
- self.locations = array.array('I' if long_format else 'H', locations)
+ self.locations = array.array("I" if long_format else "H", locations)
def toXML(self, writer, ttFont):
writer.simpletag("attributes", number=self.numAttribs)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
- if name == 'attributes':
- self.numAttribs = int(safeEval(attrs['number']))
+ if name == "attributes":
+ self.numAttribs = int(safeEval(attrs["number"]))
def __getitem__(self, index):
return self.locations[index]
diff --git a/Lib/fontTools/ttLib/tables/H_V_A_R_.py b/Lib/fontTools/ttLib/tables/H_V_A_R_.py
index 56992ad0..094aedae 100644
--- a/Lib/fontTools/ttLib/tables/H_V_A_R_.py
+++ b/Lib/fontTools/ttLib/tables/H_V_A_R_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_H_V_A_R_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/J_S_T_F_.py b/Lib/fontTools/ttLib/tables/J_S_T_F_.py
index ddf54055..111c7007 100644
--- a/Lib/fontTools/ttLib/tables/J_S_T_F_.py
+++ b/Lib/fontTools/ttLib/tables/J_S_T_F_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_J_S_T_F_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/L_T_S_H_.py b/Lib/fontTools/ttLib/tables/L_T_S_H_.py
index 94c2c22a..e0ab0d02 100644
--- a/Lib/fontTools/ttLib/tables/L_T_S_H_.py
+++ b/Lib/fontTools/ttLib/tables/L_T_S_H_.py
@@ -7,42 +7,42 @@ import array
# XXX gets through. They're looking into it, I hope to raise the standards
# XXX back to normal eventually.
-class table_L_T_S_H_(DefaultTable.DefaultTable):
- def decompile(self, data, ttFont):
- version, numGlyphs = struct.unpack(">HH", data[:4])
- data = data[4:]
- assert version == 0, "unknown version: %s" % version
- assert (len(data) % numGlyphs) < 4, "numGlyphs doesn't match data length"
- # ouch: the assertion is not true in Chicago!
- #assert numGlyphs == ttFont['maxp'].numGlyphs
- yPels = array.array("B")
- yPels.frombytes(data)
- self.yPels = {}
- for i in range(numGlyphs):
- self.yPels[ttFont.getGlyphName(i)] = yPels[i]
+class table_L_T_S_H_(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ version, numGlyphs = struct.unpack(">HH", data[:4])
+ data = data[4:]
+ assert version == 0, "unknown version: %s" % version
+ assert (len(data) % numGlyphs) < 4, "numGlyphs doesn't match data length"
+ # ouch: the assertion is not true in Chicago!
+ # assert numGlyphs == ttFont['maxp'].numGlyphs
+ yPels = array.array("B")
+ yPels.frombytes(data)
+ self.yPels = {}
+ for i in range(numGlyphs):
+ self.yPels[ttFont.getGlyphName(i)] = yPels[i]
- def compile(self, ttFont):
- version = 0
- names = list(self.yPels.keys())
- numGlyphs = len(names)
- yPels = [0] * numGlyphs
- # ouch: the assertion is not true in Chicago!
- #assert len(self.yPels) == ttFont['maxp'].numGlyphs == numGlyphs
- for name in names:
- yPels[ttFont.getGlyphID(name)] = self.yPels[name]
- yPels = array.array("B", yPels)
- return struct.pack(">HH", version, numGlyphs) + yPels.tobytes()
+ def compile(self, ttFont):
+ version = 0
+ names = list(self.yPels.keys())
+ numGlyphs = len(names)
+ yPels = [0] * numGlyphs
+ # ouch: the assertion is not true in Chicago!
+ # assert len(self.yPels) == ttFont['maxp'].numGlyphs == numGlyphs
+ for name in names:
+ yPels[ttFont.getGlyphID(name)] = self.yPels[name]
+ yPels = array.array("B", yPels)
+ return struct.pack(">HH", version, numGlyphs) + yPels.tobytes()
- def toXML(self, writer, ttFont):
- names = sorted(self.yPels.keys())
- for name in names:
- writer.simpletag("yPel", name=name, value=self.yPels[name])
- writer.newline()
+ def toXML(self, writer, ttFont):
+ names = sorted(self.yPels.keys())
+ for name in names:
+ writer.simpletag("yPel", name=name, value=self.yPels[name])
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- if not hasattr(self, "yPels"):
- self.yPels = {}
- if name != "yPel":
- return # ignore unknown tags
- self.yPels[attrs["name"]] = safeEval(attrs["value"])
+ def fromXML(self, name, attrs, content, ttFont):
+ if not hasattr(self, "yPels"):
+ self.yPels = {}
+ if name != "yPel":
+ return # ignore unknown tags
+ self.yPels[attrs["name"]] = safeEval(attrs["value"])
diff --git a/Lib/fontTools/ttLib/tables/M_A_T_H_.py b/Lib/fontTools/ttLib/tables/M_A_T_H_.py
index d894c082..011426b5 100644
--- a/Lib/fontTools/ttLib/tables/M_A_T_H_.py
+++ b/Lib/fontTools/ttLib/tables/M_A_T_H_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_M_A_T_H_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/M_E_T_A_.py b/Lib/fontTools/ttLib/tables/M_E_T_A_.py
index 990bfd2d..445aeb4d 100644
--- a/Lib/fontTools/ttLib/tables/M_E_T_A_.py
+++ b/Lib/fontTools/ttLib/tables/M_E_T_A_.py
@@ -45,259 +45,301 @@ METAStringRecordFormat = """
# Strings shall be Unicode UTF-8 encoded, and null-terminated.
METALabelDict = {
- 0: "MojikumiX4051", # An integer in the range 1-20
- 1: "UNIUnifiedBaseChars",
- 2: "BaseFontName",
- 3: "Language",
- 4: "CreationDate",
- 5: "FoundryName",
- 6: "FoundryCopyright",
- 7: "OwnerURI",
- 8: "WritingScript",
- 10: "StrokeCount",
- 11: "IndexingRadical",
+ 0: "MojikumiX4051", # An integer in the range 1-20
+ 1: "UNIUnifiedBaseChars",
+ 2: "BaseFontName",
+ 3: "Language",
+ 4: "CreationDate",
+ 5: "FoundryName",
+ 6: "FoundryCopyright",
+ 7: "OwnerURI",
+ 8: "WritingScript",
+ 10: "StrokeCount",
+ 11: "IndexingRadical",
}
def getLabelString(labelID):
- try:
- label = METALabelDict[labelID]
- except KeyError:
- label = "Unknown label"
- return str(label)
+ try:
+ label = METALabelDict[labelID]
+ except KeyError:
+ label = "Unknown label"
+ return str(label)
class table_M_E_T_A_(DefaultTable.DefaultTable):
-
- dependencies = []
-
- def decompile(self, data, ttFont):
- dummy, newData = sstruct.unpack2(METAHeaderFormat, data, self)
- self.glyphRecords = []
- for i in range(self.nMetaRecs):
- glyphRecord, newData = sstruct.unpack2(METAGlyphRecordFormat, newData, GlyphRecord())
- if self.metaFlags == 0:
- [glyphRecord.offset] = struct.unpack(">H", newData[:2])
- newData = newData[2:]
- elif self.metaFlags == 1:
- [glyphRecord.offset] = struct.unpack(">H", newData[:4])
- newData = newData[4:]
- else:
- assert 0, "The metaFlags field in the META table header has a value other than 0 or 1 :" + str(self.metaFlags)
- glyphRecord.stringRecs = []
- newData = data[glyphRecord.offset:]
- for j in range(glyphRecord.nMetaEntry):
- stringRec, newData = sstruct.unpack2(METAStringRecordFormat, newData, StringRecord())
- if self.metaFlags == 0:
- [stringRec.offset] = struct.unpack(">H", newData[:2])
- newData = newData[2:]
- else:
- [stringRec.offset] = struct.unpack(">H", newData[:4])
- newData = newData[4:]
- stringRec.string = data[stringRec.offset:stringRec.offset + stringRec.stringLen]
- glyphRecord.stringRecs.append(stringRec)
- self.glyphRecords.append(glyphRecord)
-
- def compile(self, ttFont):
- offsetOK = 0
- self.nMetaRecs = len(self.glyphRecords)
- count = 0
- while (offsetOK != 1):
- count = count + 1
- if count > 4:
- pdb.set_trace()
- metaData = sstruct.pack(METAHeaderFormat, self)
- stringRecsOffset = len(metaData) + self.nMetaRecs * (6 + 2*(self.metaFlags & 1))
- stringRecSize = (6 + 2*(self.metaFlags & 1))
- for glyphRec in self.glyphRecords:
- glyphRec.offset = stringRecsOffset
- if (glyphRec.offset > 65535) and ((self.metaFlags & 1) == 0):
- self.metaFlags = self.metaFlags + 1
- offsetOK = -1
- break
- metaData = metaData + glyphRec.compile(self)
- stringRecsOffset = stringRecsOffset + (glyphRec.nMetaEntry * stringRecSize)
- # this will be the String Record offset for the next GlyphRecord.
- if offsetOK == -1:
- offsetOK = 0
- continue
-
- # metaData now contains the header and all of the GlyphRecords. Its length should bw
- # the offset to the first StringRecord.
- stringOffset = stringRecsOffset
- for glyphRec in self.glyphRecords:
- assert (glyphRec.offset == len(metaData)), "Glyph record offset did not compile correctly! for rec:" + str(glyphRec)
- for stringRec in glyphRec.stringRecs:
- stringRec.offset = stringOffset
- if (stringRec.offset > 65535) and ((self.metaFlags & 1) == 0):
- self.metaFlags = self.metaFlags + 1
- offsetOK = -1
- break
- metaData = metaData + stringRec.compile(self)
- stringOffset = stringOffset + stringRec.stringLen
- if offsetOK == -1:
- offsetOK = 0
- continue
-
- if ((self.metaFlags & 1) == 1) and (stringOffset < 65536):
- self.metaFlags = self.metaFlags - 1
- continue
- else:
- offsetOK = 1
-
- # metaData now contains the header and all of the GlyphRecords and all of the String Records.
- # Its length should be the offset to the first string datum.
- for glyphRec in self.glyphRecords:
- for stringRec in glyphRec.stringRecs:
- assert (stringRec.offset == len(metaData)), "String offset did not compile correctly! for string:" + str(stringRec.string)
- metaData = metaData + stringRec.string
-
- return metaData
-
- def toXML(self, writer, ttFont):
- writer.comment("Lengths and number of entries in this table will be recalculated by the compiler")
- writer.newline()
- formatstring, names, fixes = sstruct.getformat(METAHeaderFormat)
- for name in names:
- value = getattr(self, name)
- writer.simpletag(name, value=value)
- writer.newline()
- for glyphRec in self.glyphRecords:
- glyphRec.toXML(writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "GlyphRecord":
- if not hasattr(self, "glyphRecords"):
- self.glyphRecords = []
- glyphRec = GlyphRecord()
- self.glyphRecords.append(glyphRec)
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- glyphRec.fromXML(name, attrs, content, ttFont)
- glyphRec.offset = -1
- glyphRec.nMetaEntry = len(glyphRec.stringRecs)
- else:
- setattr(self, name, safeEval(attrs["value"]))
+ dependencies = []
+
+ def decompile(self, data, ttFont):
+ dummy, newData = sstruct.unpack2(METAHeaderFormat, data, self)
+ self.glyphRecords = []
+ for i in range(self.nMetaRecs):
+ glyphRecord, newData = sstruct.unpack2(
+ METAGlyphRecordFormat, newData, GlyphRecord()
+ )
+ if self.metaFlags == 0:
+ [glyphRecord.offset] = struct.unpack(">H", newData[:2])
+ newData = newData[2:]
+ elif self.metaFlags == 1:
+ [glyphRecord.offset] = struct.unpack(">H", newData[:4])
+ newData = newData[4:]
+ else:
+ assert 0, (
+ "The metaFlags field in the META table header has a value other than 0 or 1 :"
+ + str(self.metaFlags)
+ )
+ glyphRecord.stringRecs = []
+ newData = data[glyphRecord.offset :]
+ for j in range(glyphRecord.nMetaEntry):
+ stringRec, newData = sstruct.unpack2(
+ METAStringRecordFormat, newData, StringRecord()
+ )
+ if self.metaFlags == 0:
+ [stringRec.offset] = struct.unpack(">H", newData[:2])
+ newData = newData[2:]
+ else:
+ [stringRec.offset] = struct.unpack(">H", newData[:4])
+ newData = newData[4:]
+ stringRec.string = data[
+ stringRec.offset : stringRec.offset + stringRec.stringLen
+ ]
+ glyphRecord.stringRecs.append(stringRec)
+ self.glyphRecords.append(glyphRecord)
+
+ def compile(self, ttFont):
+ offsetOK = 0
+ self.nMetaRecs = len(self.glyphRecords)
+ count = 0
+ while offsetOK != 1:
+ count = count + 1
+ if count > 4:
+ pdb.set_trace()
+ metaData = sstruct.pack(METAHeaderFormat, self)
+ stringRecsOffset = len(metaData) + self.nMetaRecs * (
+ 6 + 2 * (self.metaFlags & 1)
+ )
+ stringRecSize = 6 + 2 * (self.metaFlags & 1)
+ for glyphRec in self.glyphRecords:
+ glyphRec.offset = stringRecsOffset
+ if (glyphRec.offset > 65535) and ((self.metaFlags & 1) == 0):
+ self.metaFlags = self.metaFlags + 1
+ offsetOK = -1
+ break
+ metaData = metaData + glyphRec.compile(self)
+ stringRecsOffset = stringRecsOffset + (
+ glyphRec.nMetaEntry * stringRecSize
+ )
+ # this will be the String Record offset for the next GlyphRecord.
+ if offsetOK == -1:
+ offsetOK = 0
+ continue
+
+ # metaData now contains the header and all of the GlyphRecords. Its length should bw
+ # the offset to the first StringRecord.
+ stringOffset = stringRecsOffset
+ for glyphRec in self.glyphRecords:
+ assert glyphRec.offset == len(
+ metaData
+ ), "Glyph record offset did not compile correctly! for rec:" + str(
+ glyphRec
+ )
+ for stringRec in glyphRec.stringRecs:
+ stringRec.offset = stringOffset
+ if (stringRec.offset > 65535) and ((self.metaFlags & 1) == 0):
+ self.metaFlags = self.metaFlags + 1
+ offsetOK = -1
+ break
+ metaData = metaData + stringRec.compile(self)
+ stringOffset = stringOffset + stringRec.stringLen
+ if offsetOK == -1:
+ offsetOK = 0
+ continue
+
+ if ((self.metaFlags & 1) == 1) and (stringOffset < 65536):
+ self.metaFlags = self.metaFlags - 1
+ continue
+ else:
+ offsetOK = 1
+
+ # metaData now contains the header and all of the GlyphRecords and all of the String Records.
+ # Its length should be the offset to the first string datum.
+ for glyphRec in self.glyphRecords:
+ for stringRec in glyphRec.stringRecs:
+ assert stringRec.offset == len(
+ metaData
+ ), "String offset did not compile correctly! for string:" + str(
+ stringRec.string
+ )
+ metaData = metaData + stringRec.string
+
+ return metaData
+
+ def toXML(self, writer, ttFont):
+ writer.comment(
+ "Lengths and number of entries in this table will be recalculated by the compiler"
+ )
+ writer.newline()
+ formatstring, names, fixes = sstruct.getformat(METAHeaderFormat)
+ for name in names:
+ value = getattr(self, name)
+ writer.simpletag(name, value=value)
+ writer.newline()
+ for glyphRec in self.glyphRecords:
+ glyphRec.toXML(writer, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "GlyphRecord":
+ if not hasattr(self, "glyphRecords"):
+ self.glyphRecords = []
+ glyphRec = GlyphRecord()
+ self.glyphRecords.append(glyphRec)
+ for element in content:
+ if isinstance(element, str):
+ continue
+ name, attrs, content = element
+ glyphRec.fromXML(name, attrs, content, ttFont)
+ glyphRec.offset = -1
+ glyphRec.nMetaEntry = len(glyphRec.stringRecs)
+ else:
+ setattr(self, name, safeEval(attrs["value"]))
class GlyphRecord(object):
- def __init__(self):
- self.glyphID = -1
- self.nMetaEntry = -1
- self.offset = -1
- self.stringRecs = []
-
- def toXML(self, writer, ttFont):
- writer.begintag("GlyphRecord")
- writer.newline()
- writer.simpletag("glyphID", value=self.glyphID)
- writer.newline()
- writer.simpletag("nMetaEntry", value=self.nMetaEntry)
- writer.newline()
- for stringRec in self.stringRecs:
- stringRec.toXML(writer, ttFont)
- writer.endtag("GlyphRecord")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "StringRecord":
- stringRec = StringRecord()
- self.stringRecs.append(stringRec)
- for element in content:
- if isinstance(element, str):
- continue
- stringRec.fromXML(name, attrs, content, ttFont)
- stringRec.stringLen = len(stringRec.string)
- else:
- setattr(self, name, safeEval(attrs["value"]))
-
- def compile(self, parentTable):
- data = sstruct.pack(METAGlyphRecordFormat, self)
- if parentTable.metaFlags == 0:
- datum = struct.pack(">H", self.offset)
- elif parentTable.metaFlags == 1:
- datum = struct.pack(">L", self.offset)
- data = data + datum
- return data
-
- def __repr__(self):
- return "GlyphRecord[ glyphID: " + str(self.glyphID) + ", nMetaEntry: " + str(self.nMetaEntry) + ", offset: " + str(self.offset) + " ]"
+ def __init__(self):
+ self.glyphID = -1
+ self.nMetaEntry = -1
+ self.offset = -1
+ self.stringRecs = []
+
+ def toXML(self, writer, ttFont):
+ writer.begintag("GlyphRecord")
+ writer.newline()
+ writer.simpletag("glyphID", value=self.glyphID)
+ writer.newline()
+ writer.simpletag("nMetaEntry", value=self.nMetaEntry)
+ writer.newline()
+ for stringRec in self.stringRecs:
+ stringRec.toXML(writer, ttFont)
+ writer.endtag("GlyphRecord")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "StringRecord":
+ stringRec = StringRecord()
+ self.stringRecs.append(stringRec)
+ for element in content:
+ if isinstance(element, str):
+ continue
+ stringRec.fromXML(name, attrs, content, ttFont)
+ stringRec.stringLen = len(stringRec.string)
+ else:
+ setattr(self, name, safeEval(attrs["value"]))
+
+ def compile(self, parentTable):
+ data = sstruct.pack(METAGlyphRecordFormat, self)
+ if parentTable.metaFlags == 0:
+ datum = struct.pack(">H", self.offset)
+ elif parentTable.metaFlags == 1:
+ datum = struct.pack(">L", self.offset)
+ data = data + datum
+ return data
+
+ def __repr__(self):
+ return (
+ "GlyphRecord[ glyphID: "
+ + str(self.glyphID)
+ + ", nMetaEntry: "
+ + str(self.nMetaEntry)
+ + ", offset: "
+ + str(self.offset)
+ + " ]"
+ )
+
# XXX The following two functions are really broken around UTF-8 vs Unicode
+
def mapXMLToUTF8(string):
- uString = str()
- strLen = len(string)
- i = 0
- while i < strLen:
- prefixLen = 0
- if (string[i:i+3] == "&#x"):
- prefixLen = 3
- elif (string[i:i+7] == "&amp;#x"):
- prefixLen = 7
- if prefixLen:
- i = i+prefixLen
- j= i
- while string[i] != ";":
- i = i+1
- valStr = string[j:i]
-
- uString = uString + chr(eval('0x' + valStr))
- else:
- uString = uString + chr(byteord(string[i]))
- i = i +1
-
- return uString.encode('utf_8')
+ uString = str()
+ strLen = len(string)
+ i = 0
+ while i < strLen:
+ prefixLen = 0
+ if string[i : i + 3] == "&#x":
+ prefixLen = 3
+ elif string[i : i + 7] == "&amp;#x":
+ prefixLen = 7
+ if prefixLen:
+ i = i + prefixLen
+ j = i
+ while string[i] != ";":
+ i = i + 1
+ valStr = string[j:i]
+
+ uString = uString + chr(eval("0x" + valStr))
+ else:
+ uString = uString + chr(byteord(string[i]))
+ i = i + 1
+
+ return uString.encode("utf_8")
def mapUTF8toXML(string):
- uString = string.decode('utf_8')
- string = ""
- for uChar in uString:
- i = ord(uChar)
- if (i < 0x80) and (i > 0x1F):
- string = string + uChar
- else:
- string = string + "&#x" + hex(i)[2:] + ";"
- return string
+ uString = string.decode("utf_8")
+ string = ""
+ for uChar in uString:
+ i = ord(uChar)
+ if (i < 0x80) and (i > 0x1F):
+ string = string + uChar
+ else:
+ string = string + "&#x" + hex(i)[2:] + ";"
+ return string
class StringRecord(object):
-
- def toXML(self, writer, ttFont):
- writer.begintag("StringRecord")
- writer.newline()
- writer.simpletag("labelID", value=self.labelID)
- writer.comment(getLabelString(self.labelID))
- writer.newline()
- writer.newline()
- writer.simpletag("string", value=mapUTF8toXML(self.string))
- writer.newline()
- writer.endtag("StringRecord")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- value = attrs["value"]
- if name == "string":
- self.string = mapXMLToUTF8(value)
- else:
- setattr(self, name, safeEval(value))
-
- def compile(self, parentTable):
- data = sstruct.pack(METAStringRecordFormat, self)
- if parentTable.metaFlags == 0:
- datum = struct.pack(">H", self.offset)
- elif parentTable.metaFlags == 1:
- datum = struct.pack(">L", self.offset)
- data = data + datum
- return data
-
- def __repr__(self):
- return "StringRecord [ labelID: " + str(self.labelID) + " aka " + getLabelString(self.labelID) \
- + ", offset: " + str(self.offset) + ", length: " + str(self.stringLen) + ", string: " +self.string + " ]"
+ def toXML(self, writer, ttFont):
+ writer.begintag("StringRecord")
+ writer.newline()
+ writer.simpletag("labelID", value=self.labelID)
+ writer.comment(getLabelString(self.labelID))
+ writer.newline()
+ writer.newline()
+ writer.simpletag("string", value=mapUTF8toXML(self.string))
+ writer.newline()
+ writer.endtag("StringRecord")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ for element in content:
+ if isinstance(element, str):
+ continue
+ name, attrs, content = element
+ value = attrs["value"]
+ if name == "string":
+ self.string = mapXMLToUTF8(value)
+ else:
+ setattr(self, name, safeEval(value))
+
+ def compile(self, parentTable):
+ data = sstruct.pack(METAStringRecordFormat, self)
+ if parentTable.metaFlags == 0:
+ datum = struct.pack(">H", self.offset)
+ elif parentTable.metaFlags == 1:
+ datum = struct.pack(">L", self.offset)
+ data = data + datum
+ return data
+
+ def __repr__(self):
+ return (
+ "StringRecord [ labelID: "
+ + str(self.labelID)
+ + " aka "
+ + getLabelString(self.labelID)
+ + ", offset: "
+ + str(self.offset)
+ + ", length: "
+ + str(self.stringLen)
+ + ", string: "
+ + self.string
+ + " ]"
+ )
diff --git a/Lib/fontTools/ttLib/tables/M_V_A_R_.py b/Lib/fontTools/ttLib/tables/M_V_A_R_.py
index 34ab20f7..8371795e 100644
--- a/Lib/fontTools/ttLib/tables/M_V_A_R_.py
+++ b/Lib/fontTools/ttLib/tables/M_V_A_R_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_M_V_A_R_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/O_S_2f_2.py b/Lib/fontTools/ttLib/tables/O_S_2f_2.py
index ba2e3961..7b403026 100644
--- a/Lib/fontTools/ttLib/tables/O_S_2f_2.py
+++ b/Lib/fontTools/ttLib/tables/O_S_2f_2.py
@@ -23,16 +23,23 @@ panoseFormat = """
bXHeight: B
"""
+
class Panose(object):
+ def __init__(self, **kwargs):
+ _, names, _ = sstruct.getformat(panoseFormat)
+ for name in names:
+ setattr(self, name, kwargs.pop(name, 0))
+ for k in kwargs:
+ raise TypeError(f"Panose() got an unexpected keyword argument {k!r}")
- def toXML(self, writer, ttFont):
- formatstring, names, fixes = sstruct.getformat(panoseFormat)
- for name in names:
- writer.simpletag(name, value=getattr(self, name))
- writer.newline()
+ def toXML(self, writer, ttFont):
+ formatstring, names, fixes = sstruct.getformat(panoseFormat)
+ for name in names:
+ writer.simpletag(name, value=getattr(self, name))
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- setattr(self, name, safeEval(attrs["value"]))
+ def fromXML(self, name, attrs, content, ttFont):
+ setattr(self, name, safeEval(attrs["value"]))
# 'sfnt' OS/2 and Windows Metrics table - 'OS/2'
@@ -71,23 +78,29 @@ OS2_format_0 = """
usWinDescent: H # Windows descender
"""
-OS2_format_1_addition = """
+OS2_format_1_addition = """
ulCodePageRange1: L
ulCodePageRange2: L
"""
-OS2_format_2_addition = OS2_format_1_addition + """
+OS2_format_2_addition = (
+ OS2_format_1_addition
+ + """
sxHeight: h
sCapHeight: h
usDefaultChar: H
usBreakChar: H
usMaxContext: H
"""
+)
-OS2_format_5_addition = OS2_format_2_addition + """
+OS2_format_5_addition = (
+ OS2_format_2_addition
+ + """
usLowerOpticalPointSize: H
usUpperOpticalPointSize: H
"""
+)
bigendian = " > # big endian\n"
@@ -101,438 +114,504 @@ OS2_format_5_addition = bigendian + OS2_format_5_addition
class table_O_S_2f_2(DefaultTable.DefaultTable):
- """the OS/2 table"""
-
- dependencies = ["head"]
-
- def decompile(self, data, ttFont):
- dummy, data = sstruct.unpack2(OS2_format_0, data, self)
-
- if self.version == 1:
- dummy, data = sstruct.unpack2(OS2_format_1_addition, data, self)
- elif self.version in (2, 3, 4):
- dummy, data = sstruct.unpack2(OS2_format_2_addition, data, self)
- elif self.version == 5:
- dummy, data = sstruct.unpack2(OS2_format_5_addition, data, self)
- self.usLowerOpticalPointSize /= 20
- self.usUpperOpticalPointSize /= 20
- elif self.version != 0:
- from fontTools import ttLib
- raise ttLib.TTLibError("unknown format for OS/2 table: version %s" % self.version)
- if len(data):
- log.warning("too much 'OS/2' table data")
-
- self.panose = sstruct.unpack(panoseFormat, self.panose, Panose())
-
- def compile(self, ttFont):
- self.updateFirstAndLastCharIndex(ttFont)
- panose = self.panose
- head = ttFont["head"]
- if (self.fsSelection & 1) and not (head.macStyle & 1<<1):
- log.warning("fsSelection bit 0 (italic) and "
- "head table macStyle bit 1 (italic) should match")
- if (self.fsSelection & 1<<5) and not (head.macStyle & 1):
- log.warning("fsSelection bit 5 (bold) and "
- "head table macStyle bit 0 (bold) should match")
- if (self.fsSelection & 1<<6) and (self.fsSelection & 1 + (1<<5)):
- log.warning("fsSelection bit 6 (regular) is set, "
- "bits 0 (italic) and 5 (bold) must be clear")
- if self.version < 4 and self.fsSelection & 0b1110000000:
- log.warning("fsSelection bits 7, 8 and 9 are only defined in "
- "OS/2 table version 4 and up: version %s", self.version)
- self.panose = sstruct.pack(panoseFormat, self.panose)
- if self.version == 0:
- data = sstruct.pack(OS2_format_0, self)
- elif self.version == 1:
- data = sstruct.pack(OS2_format_1, self)
- elif self.version in (2, 3, 4):
- data = sstruct.pack(OS2_format_2, self)
- elif self.version == 5:
- d = self.__dict__.copy()
- d['usLowerOpticalPointSize'] = round(self.usLowerOpticalPointSize * 20)
- d['usUpperOpticalPointSize'] = round(self.usUpperOpticalPointSize * 20)
- data = sstruct.pack(OS2_format_5, d)
- else:
- from fontTools import ttLib
- raise ttLib.TTLibError("unknown format for OS/2 table: version %s" % self.version)
- self.panose = panose
- return data
-
- def toXML(self, writer, ttFont):
- writer.comment(
- "The fields 'usFirstCharIndex' and 'usLastCharIndex'\n"
- "will be recalculated by the compiler")
- writer.newline()
- if self.version == 1:
- format = OS2_format_1
- elif self.version in (2, 3, 4):
- format = OS2_format_2
- elif self.version == 5:
- format = OS2_format_5
- else:
- format = OS2_format_0
- formatstring, names, fixes = sstruct.getformat(format)
- for name in names:
- value = getattr(self, name)
- if name=="panose":
- writer.begintag("panose")
- writer.newline()
- value.toXML(writer, ttFont)
- writer.endtag("panose")
- elif name in ("ulUnicodeRange1", "ulUnicodeRange2",
- "ulUnicodeRange3", "ulUnicodeRange4",
- "ulCodePageRange1", "ulCodePageRange2"):
- writer.simpletag(name, value=num2binary(value))
- elif name in ("fsType", "fsSelection"):
- writer.simpletag(name, value=num2binary(value, 16))
- elif name == "achVendID":
- writer.simpletag(name, value=repr(value)[1:-1])
- else:
- writer.simpletag(name, value=value)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "panose":
- self.panose = panose = Panose()
- for element in content:
- if isinstance(element, tuple):
- name, attrs, content = element
- panose.fromXML(name, attrs, content, ttFont)
- elif name in ("ulUnicodeRange1", "ulUnicodeRange2",
- "ulUnicodeRange3", "ulUnicodeRange4",
- "ulCodePageRange1", "ulCodePageRange2",
- "fsType", "fsSelection"):
- setattr(self, name, binary2num(attrs["value"]))
- elif name == "achVendID":
- setattr(self, name, safeEval("'''" + attrs["value"] + "'''"))
- else:
- setattr(self, name, safeEval(attrs["value"]))
-
- def updateFirstAndLastCharIndex(self, ttFont):
- if 'cmap' not in ttFont:
- return
- codes = set()
- for table in getattr(ttFont['cmap'], 'tables', []):
- if table.isUnicode():
- codes.update(table.cmap.keys())
- if codes:
- minCode = min(codes)
- maxCode = max(codes)
- # USHORT cannot hold codepoints greater than 0xFFFF
- self.usFirstCharIndex = min(0xFFFF, minCode)
- self.usLastCharIndex = min(0xFFFF, maxCode)
-
- # misspelled attributes kept for legacy reasons
-
- @property
- def usMaxContex(self):
- return self.usMaxContext
-
- @usMaxContex.setter
- def usMaxContex(self, value):
- self.usMaxContext = value
-
- @property
- def fsFirstCharIndex(self):
- return self.usFirstCharIndex
-
- @fsFirstCharIndex.setter
- def fsFirstCharIndex(self, value):
- self.usFirstCharIndex = value
-
- @property
- def fsLastCharIndex(self):
- return self.usLastCharIndex
-
- @fsLastCharIndex.setter
- def fsLastCharIndex(self, value):
- self.usLastCharIndex = value
-
- def getUnicodeRanges(self):
- """ Return the set of 'ulUnicodeRange*' bits currently enabled. """
- bits = set()
- ul1, ul2 = self.ulUnicodeRange1, self.ulUnicodeRange2
- ul3, ul4 = self.ulUnicodeRange3, self.ulUnicodeRange4
- for i in range(32):
- if ul1 & (1 << i):
- bits.add(i)
- if ul2 & (1 << i):
- bits.add(i + 32)
- if ul3 & (1 << i):
- bits.add(i + 64)
- if ul4 & (1 << i):
- bits.add(i + 96)
- return bits
-
- def setUnicodeRanges(self, bits):
- """ Set the 'ulUnicodeRange*' fields to the specified 'bits'. """
- ul1, ul2, ul3, ul4 = 0, 0, 0, 0
- for bit in bits:
- if 0 <= bit < 32:
- ul1 |= (1 << bit)
- elif 32 <= bit < 64:
- ul2 |= (1 << (bit - 32))
- elif 64 <= bit < 96:
- ul3 |= (1 << (bit - 64))
- elif 96 <= bit < 123:
- ul4 |= (1 << (bit - 96))
- else:
- raise ValueError('expected 0 <= int <= 122, found: %r' % bit)
- self.ulUnicodeRange1, self.ulUnicodeRange2 = ul1, ul2
- self.ulUnicodeRange3, self.ulUnicodeRange4 = ul3, ul4
-
- def recalcUnicodeRanges(self, ttFont, pruneOnly=False):
- """ Intersect the codepoints in the font's Unicode cmap subtables with
- the Unicode block ranges defined in the OpenType specification (v1.7),
- and set the respective 'ulUnicodeRange*' bits if there is at least ONE
- intersection.
- If 'pruneOnly' is True, only clear unused bits with NO intersection.
- """
- unicodes = set()
- for table in ttFont['cmap'].tables:
- if table.isUnicode():
- unicodes.update(table.cmap.keys())
- if pruneOnly:
- empty = intersectUnicodeRanges(unicodes, inverse=True)
- bits = self.getUnicodeRanges() - empty
- else:
- bits = intersectUnicodeRanges(unicodes)
- self.setUnicodeRanges(bits)
- return bits
-
- def recalcAvgCharWidth(self, ttFont):
- """Recalculate xAvgCharWidth using metrics from ttFont's 'hmtx' table.
-
- Set it to 0 if the unlikely event 'hmtx' table is not found.
- """
- avg_width = 0
- hmtx = ttFont.get("hmtx")
- if hmtx:
- widths = [m[0] for m in hmtx.metrics.values() if m[0] > 0]
- avg_width = otRound(sum(widths) / len(widths))
- self.xAvgCharWidth = avg_width
- return avg_width
+ """the OS/2 table"""
+
+ dependencies = ["head"]
+
+ def decompile(self, data, ttFont):
+ dummy, data = sstruct.unpack2(OS2_format_0, data, self)
+
+ if self.version == 1:
+ dummy, data = sstruct.unpack2(OS2_format_1_addition, data, self)
+ elif self.version in (2, 3, 4):
+ dummy, data = sstruct.unpack2(OS2_format_2_addition, data, self)
+ elif self.version == 5:
+ dummy, data = sstruct.unpack2(OS2_format_5_addition, data, self)
+ self.usLowerOpticalPointSize /= 20
+ self.usUpperOpticalPointSize /= 20
+ elif self.version != 0:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError(
+ "unknown format for OS/2 table: version %s" % self.version
+ )
+ if len(data):
+ log.warning("too much 'OS/2' table data")
+
+ self.panose = sstruct.unpack(panoseFormat, self.panose, Panose())
+
+ def compile(self, ttFont):
+ self.updateFirstAndLastCharIndex(ttFont)
+ panose = self.panose
+ head = ttFont["head"]
+ if (self.fsSelection & 1) and not (head.macStyle & 1 << 1):
+ log.warning(
+ "fsSelection bit 0 (italic) and "
+ "head table macStyle bit 1 (italic) should match"
+ )
+ if (self.fsSelection & 1 << 5) and not (head.macStyle & 1):
+ log.warning(
+ "fsSelection bit 5 (bold) and "
+ "head table macStyle bit 0 (bold) should match"
+ )
+ if (self.fsSelection & 1 << 6) and (self.fsSelection & 1 + (1 << 5)):
+ log.warning(
+ "fsSelection bit 6 (regular) is set, "
+ "bits 0 (italic) and 5 (bold) must be clear"
+ )
+ if self.version < 4 and self.fsSelection & 0b1110000000:
+ log.warning(
+ "fsSelection bits 7, 8 and 9 are only defined in "
+ "OS/2 table version 4 and up: version %s",
+ self.version,
+ )
+ self.panose = sstruct.pack(panoseFormat, self.panose)
+ if self.version == 0:
+ data = sstruct.pack(OS2_format_0, self)
+ elif self.version == 1:
+ data = sstruct.pack(OS2_format_1, self)
+ elif self.version in (2, 3, 4):
+ data = sstruct.pack(OS2_format_2, self)
+ elif self.version == 5:
+ d = self.__dict__.copy()
+ d["usLowerOpticalPointSize"] = round(self.usLowerOpticalPointSize * 20)
+ d["usUpperOpticalPointSize"] = round(self.usUpperOpticalPointSize * 20)
+ data = sstruct.pack(OS2_format_5, d)
+ else:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError(
+ "unknown format for OS/2 table: version %s" % self.version
+ )
+ self.panose = panose
+ return data
+
+ def toXML(self, writer, ttFont):
+ writer.comment(
+ "The fields 'usFirstCharIndex' and 'usLastCharIndex'\n"
+ "will be recalculated by the compiler"
+ )
+ writer.newline()
+ if self.version == 1:
+ format = OS2_format_1
+ elif self.version in (2, 3, 4):
+ format = OS2_format_2
+ elif self.version == 5:
+ format = OS2_format_5
+ else:
+ format = OS2_format_0
+ formatstring, names, fixes = sstruct.getformat(format)
+ for name in names:
+ value = getattr(self, name)
+ if name == "panose":
+ writer.begintag("panose")
+ writer.newline()
+ value.toXML(writer, ttFont)
+ writer.endtag("panose")
+ elif name in (
+ "ulUnicodeRange1",
+ "ulUnicodeRange2",
+ "ulUnicodeRange3",
+ "ulUnicodeRange4",
+ "ulCodePageRange1",
+ "ulCodePageRange2",
+ ):
+ writer.simpletag(name, value=num2binary(value))
+ elif name in ("fsType", "fsSelection"):
+ writer.simpletag(name, value=num2binary(value, 16))
+ elif name == "achVendID":
+ writer.simpletag(name, value=repr(value)[1:-1])
+ else:
+ writer.simpletag(name, value=value)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "panose":
+ self.panose = panose = Panose()
+ for element in content:
+ if isinstance(element, tuple):
+ name, attrs, content = element
+ panose.fromXML(name, attrs, content, ttFont)
+ elif name in (
+ "ulUnicodeRange1",
+ "ulUnicodeRange2",
+ "ulUnicodeRange3",
+ "ulUnicodeRange4",
+ "ulCodePageRange1",
+ "ulCodePageRange2",
+ "fsType",
+ "fsSelection",
+ ):
+ setattr(self, name, binary2num(attrs["value"]))
+ elif name == "achVendID":
+ setattr(self, name, safeEval("'''" + attrs["value"] + "'''"))
+ else:
+ setattr(self, name, safeEval(attrs["value"]))
+
+ def updateFirstAndLastCharIndex(self, ttFont):
+ if "cmap" not in ttFont:
+ return
+ codes = set()
+ for table in getattr(ttFont["cmap"], "tables", []):
+ if table.isUnicode():
+ codes.update(table.cmap.keys())
+ if codes:
+ minCode = min(codes)
+ maxCode = max(codes)
+ # USHORT cannot hold codepoints greater than 0xFFFF
+ self.usFirstCharIndex = min(0xFFFF, minCode)
+ self.usLastCharIndex = min(0xFFFF, maxCode)
+
+ # misspelled attributes kept for legacy reasons
+
+ @property
+ def usMaxContex(self):
+ return self.usMaxContext
+
+ @usMaxContex.setter
+ def usMaxContex(self, value):
+ self.usMaxContext = value
+
+ @property
+ def fsFirstCharIndex(self):
+ return self.usFirstCharIndex
+
+ @fsFirstCharIndex.setter
+ def fsFirstCharIndex(self, value):
+ self.usFirstCharIndex = value
+
+ @property
+ def fsLastCharIndex(self):
+ return self.usLastCharIndex
+
+ @fsLastCharIndex.setter
+ def fsLastCharIndex(self, value):
+ self.usLastCharIndex = value
+
+ def getUnicodeRanges(self):
+ """Return the set of 'ulUnicodeRange*' bits currently enabled."""
+ bits = set()
+ ul1, ul2 = self.ulUnicodeRange1, self.ulUnicodeRange2
+ ul3, ul4 = self.ulUnicodeRange3, self.ulUnicodeRange4
+ for i in range(32):
+ if ul1 & (1 << i):
+ bits.add(i)
+ if ul2 & (1 << i):
+ bits.add(i + 32)
+ if ul3 & (1 << i):
+ bits.add(i + 64)
+ if ul4 & (1 << i):
+ bits.add(i + 96)
+ return bits
+
+ def setUnicodeRanges(self, bits):
+ """Set the 'ulUnicodeRange*' fields to the specified 'bits'."""
+ ul1, ul2, ul3, ul4 = 0, 0, 0, 0
+ for bit in bits:
+ if 0 <= bit < 32:
+ ul1 |= 1 << bit
+ elif 32 <= bit < 64:
+ ul2 |= 1 << (bit - 32)
+ elif 64 <= bit < 96:
+ ul3 |= 1 << (bit - 64)
+ elif 96 <= bit < 123:
+ ul4 |= 1 << (bit - 96)
+ else:
+ raise ValueError("expected 0 <= int <= 122, found: %r" % bit)
+ self.ulUnicodeRange1, self.ulUnicodeRange2 = ul1, ul2
+ self.ulUnicodeRange3, self.ulUnicodeRange4 = ul3, ul4
+
+ def recalcUnicodeRanges(self, ttFont, pruneOnly=False):
+ """Intersect the codepoints in the font's Unicode cmap subtables with
+ the Unicode block ranges defined in the OpenType specification (v1.7),
+ and set the respective 'ulUnicodeRange*' bits if there is at least ONE
+ intersection.
+ If 'pruneOnly' is True, only clear unused bits with NO intersection.
+ """
+ unicodes = set()
+ for table in ttFont["cmap"].tables:
+ if table.isUnicode():
+ unicodes.update(table.cmap.keys())
+ if pruneOnly:
+ empty = intersectUnicodeRanges(unicodes, inverse=True)
+ bits = self.getUnicodeRanges() - empty
+ else:
+ bits = intersectUnicodeRanges(unicodes)
+ self.setUnicodeRanges(bits)
+ return bits
+
+ def recalcAvgCharWidth(self, ttFont):
+ """Recalculate xAvgCharWidth using metrics from ttFont's 'hmtx' table.
+
+ Set it to 0 if the unlikely event 'hmtx' table is not found.
+ """
+ avg_width = 0
+ hmtx = ttFont.get("hmtx")
+ if hmtx is not None:
+ widths = [width for width, _ in hmtx.metrics.values() if width > 0]
+ if widths:
+ avg_width = otRound(sum(widths) / len(widths))
+ self.xAvgCharWidth = avg_width
+ return avg_width
# Unicode ranges data from the OpenType OS/2 table specification v1.7
OS2_UNICODE_RANGES = (
- (('Basic Latin', (0x0000, 0x007F)),),
- (('Latin-1 Supplement', (0x0080, 0x00FF)),),
- (('Latin Extended-A', (0x0100, 0x017F)),),
- (('Latin Extended-B', (0x0180, 0x024F)),),
- (('IPA Extensions', (0x0250, 0x02AF)),
- ('Phonetic Extensions', (0x1D00, 0x1D7F)),
- ('Phonetic Extensions Supplement', (0x1D80, 0x1DBF))),
- (('Spacing Modifier Letters', (0x02B0, 0x02FF)),
- ('Modifier Tone Letters', (0xA700, 0xA71F))),
- (('Combining Diacritical Marks', (0x0300, 0x036F)),
- ('Combining Diacritical Marks Supplement', (0x1DC0, 0x1DFF))),
- (('Greek and Coptic', (0x0370, 0x03FF)),),
- (('Coptic', (0x2C80, 0x2CFF)),),
- (('Cyrillic', (0x0400, 0x04FF)),
- ('Cyrillic Supplement', (0x0500, 0x052F)),
- ('Cyrillic Extended-A', (0x2DE0, 0x2DFF)),
- ('Cyrillic Extended-B', (0xA640, 0xA69F))),
- (('Armenian', (0x0530, 0x058F)),),
- (('Hebrew', (0x0590, 0x05FF)),),
- (('Vai', (0xA500, 0xA63F)),),
- (('Arabic', (0x0600, 0x06FF)),
- ('Arabic Supplement', (0x0750, 0x077F))),
- (('NKo', (0x07C0, 0x07FF)),),
- (('Devanagari', (0x0900, 0x097F)),),
- (('Bengali', (0x0980, 0x09FF)),),
- (('Gurmukhi', (0x0A00, 0x0A7F)),),
- (('Gujarati', (0x0A80, 0x0AFF)),),
- (('Oriya', (0x0B00, 0x0B7F)),),
- (('Tamil', (0x0B80, 0x0BFF)),),
- (('Telugu', (0x0C00, 0x0C7F)),),
- (('Kannada', (0x0C80, 0x0CFF)),),
- (('Malayalam', (0x0D00, 0x0D7F)),),
- (('Thai', (0x0E00, 0x0E7F)),),
- (('Lao', (0x0E80, 0x0EFF)),),
- (('Georgian', (0x10A0, 0x10FF)),
- ('Georgian Supplement', (0x2D00, 0x2D2F))),
- (('Balinese', (0x1B00, 0x1B7F)),),
- (('Hangul Jamo', (0x1100, 0x11FF)),),
- (('Latin Extended Additional', (0x1E00, 0x1EFF)),
- ('Latin Extended-C', (0x2C60, 0x2C7F)),
- ('Latin Extended-D', (0xA720, 0xA7FF))),
- (('Greek Extended', (0x1F00, 0x1FFF)),),
- (('General Punctuation', (0x2000, 0x206F)),
- ('Supplemental Punctuation', (0x2E00, 0x2E7F))),
- (('Superscripts And Subscripts', (0x2070, 0x209F)),),
- (('Currency Symbols', (0x20A0, 0x20CF)),),
- (('Combining Diacritical Marks For Symbols', (0x20D0, 0x20FF)),),
- (('Letterlike Symbols', (0x2100, 0x214F)),),
- (('Number Forms', (0x2150, 0x218F)),),
- (('Arrows', (0x2190, 0x21FF)),
- ('Supplemental Arrows-A', (0x27F0, 0x27FF)),
- ('Supplemental Arrows-B', (0x2900, 0x297F)),
- ('Miscellaneous Symbols and Arrows', (0x2B00, 0x2BFF))),
- (('Mathematical Operators', (0x2200, 0x22FF)),
- ('Supplemental Mathematical Operators', (0x2A00, 0x2AFF)),
- ('Miscellaneous Mathematical Symbols-A', (0x27C0, 0x27EF)),
- ('Miscellaneous Mathematical Symbols-B', (0x2980, 0x29FF))),
- (('Miscellaneous Technical', (0x2300, 0x23FF)),),
- (('Control Pictures', (0x2400, 0x243F)),),
- (('Optical Character Recognition', (0x2440, 0x245F)),),
- (('Enclosed Alphanumerics', (0x2460, 0x24FF)),),
- (('Box Drawing', (0x2500, 0x257F)),),
- (('Block Elements', (0x2580, 0x259F)),),
- (('Geometric Shapes', (0x25A0, 0x25FF)),),
- (('Miscellaneous Symbols', (0x2600, 0x26FF)),),
- (('Dingbats', (0x2700, 0x27BF)),),
- (('CJK Symbols And Punctuation', (0x3000, 0x303F)),),
- (('Hiragana', (0x3040, 0x309F)),),
- (('Katakana', (0x30A0, 0x30FF)),
- ('Katakana Phonetic Extensions', (0x31F0, 0x31FF))),
- (('Bopomofo', (0x3100, 0x312F)),
- ('Bopomofo Extended', (0x31A0, 0x31BF))),
- (('Hangul Compatibility Jamo', (0x3130, 0x318F)),),
- (('Phags-pa', (0xA840, 0xA87F)),),
- (('Enclosed CJK Letters And Months', (0x3200, 0x32FF)),),
- (('CJK Compatibility', (0x3300, 0x33FF)),),
- (('Hangul Syllables', (0xAC00, 0xD7AF)),),
- (('Non-Plane 0 *', (0xD800, 0xDFFF)),),
- (('Phoenician', (0x10900, 0x1091F)),),
- (('CJK Unified Ideographs', (0x4E00, 0x9FFF)),
- ('CJK Radicals Supplement', (0x2E80, 0x2EFF)),
- ('Kangxi Radicals', (0x2F00, 0x2FDF)),
- ('Ideographic Description Characters', (0x2FF0, 0x2FFF)),
- ('CJK Unified Ideographs Extension A', (0x3400, 0x4DBF)),
- ('CJK Unified Ideographs Extension B', (0x20000, 0x2A6DF)),
- ('Kanbun', (0x3190, 0x319F))),
- (('Private Use Area (plane 0)', (0xE000, 0xF8FF)),),
- (('CJK Strokes', (0x31C0, 0x31EF)),
- ('CJK Compatibility Ideographs', (0xF900, 0xFAFF)),
- ('CJK Compatibility Ideographs Supplement', (0x2F800, 0x2FA1F))),
- (('Alphabetic Presentation Forms', (0xFB00, 0xFB4F)),),
- (('Arabic Presentation Forms-A', (0xFB50, 0xFDFF)),),
- (('Combining Half Marks', (0xFE20, 0xFE2F)),),
- (('Vertical Forms', (0xFE10, 0xFE1F)),
- ('CJK Compatibility Forms', (0xFE30, 0xFE4F))),
- (('Small Form Variants', (0xFE50, 0xFE6F)),),
- (('Arabic Presentation Forms-B', (0xFE70, 0xFEFF)),),
- (('Halfwidth And Fullwidth Forms', (0xFF00, 0xFFEF)),),
- (('Specials', (0xFFF0, 0xFFFF)),),
- (('Tibetan', (0x0F00, 0x0FFF)),),
- (('Syriac', (0x0700, 0x074F)),),
- (('Thaana', (0x0780, 0x07BF)),),
- (('Sinhala', (0x0D80, 0x0DFF)),),
- (('Myanmar', (0x1000, 0x109F)),),
- (('Ethiopic', (0x1200, 0x137F)),
- ('Ethiopic Supplement', (0x1380, 0x139F)),
- ('Ethiopic Extended', (0x2D80, 0x2DDF))),
- (('Cherokee', (0x13A0, 0x13FF)),),
- (('Unified Canadian Aboriginal Syllabics', (0x1400, 0x167F)),),
- (('Ogham', (0x1680, 0x169F)),),
- (('Runic', (0x16A0, 0x16FF)),),
- (('Khmer', (0x1780, 0x17FF)),
- ('Khmer Symbols', (0x19E0, 0x19FF))),
- (('Mongolian', (0x1800, 0x18AF)),),
- (('Braille Patterns', (0x2800, 0x28FF)),),
- (('Yi Syllables', (0xA000, 0xA48F)),
- ('Yi Radicals', (0xA490, 0xA4CF))),
- (('Tagalog', (0x1700, 0x171F)),
- ('Hanunoo', (0x1720, 0x173F)),
- ('Buhid', (0x1740, 0x175F)),
- ('Tagbanwa', (0x1760, 0x177F))),
- (('Old Italic', (0x10300, 0x1032F)),),
- (('Gothic', (0x10330, 0x1034F)),),
- (('Deseret', (0x10400, 0x1044F)),),
- (('Byzantine Musical Symbols', (0x1D000, 0x1D0FF)),
- ('Musical Symbols', (0x1D100, 0x1D1FF)),
- ('Ancient Greek Musical Notation', (0x1D200, 0x1D24F))),
- (('Mathematical Alphanumeric Symbols', (0x1D400, 0x1D7FF)),),
- (('Private Use (plane 15)', (0xF0000, 0xFFFFD)),
- ('Private Use (plane 16)', (0x100000, 0x10FFFD))),
- (('Variation Selectors', (0xFE00, 0xFE0F)),
- ('Variation Selectors Supplement', (0xE0100, 0xE01EF))),
- (('Tags', (0xE0000, 0xE007F)),),
- (('Limbu', (0x1900, 0x194F)),),
- (('Tai Le', (0x1950, 0x197F)),),
- (('New Tai Lue', (0x1980, 0x19DF)),),
- (('Buginese', (0x1A00, 0x1A1F)),),
- (('Glagolitic', (0x2C00, 0x2C5F)),),
- (('Tifinagh', (0x2D30, 0x2D7F)),),
- (('Yijing Hexagram Symbols', (0x4DC0, 0x4DFF)),),
- (('Syloti Nagri', (0xA800, 0xA82F)),),
- (('Linear B Syllabary', (0x10000, 0x1007F)),
- ('Linear B Ideograms', (0x10080, 0x100FF)),
- ('Aegean Numbers', (0x10100, 0x1013F))),
- (('Ancient Greek Numbers', (0x10140, 0x1018F)),),
- (('Ugaritic', (0x10380, 0x1039F)),),
- (('Old Persian', (0x103A0, 0x103DF)),),
- (('Shavian', (0x10450, 0x1047F)),),
- (('Osmanya', (0x10480, 0x104AF)),),
- (('Cypriot Syllabary', (0x10800, 0x1083F)),),
- (('Kharoshthi', (0x10A00, 0x10A5F)),),
- (('Tai Xuan Jing Symbols', (0x1D300, 0x1D35F)),),
- (('Cuneiform', (0x12000, 0x123FF)),
- ('Cuneiform Numbers and Punctuation', (0x12400, 0x1247F))),
- (('Counting Rod Numerals', (0x1D360, 0x1D37F)),),
- (('Sundanese', (0x1B80, 0x1BBF)),),
- (('Lepcha', (0x1C00, 0x1C4F)),),
- (('Ol Chiki', (0x1C50, 0x1C7F)),),
- (('Saurashtra', (0xA880, 0xA8DF)),),
- (('Kayah Li', (0xA900, 0xA92F)),),
- (('Rejang', (0xA930, 0xA95F)),),
- (('Cham', (0xAA00, 0xAA5F)),),
- (('Ancient Symbols', (0x10190, 0x101CF)),),
- (('Phaistos Disc', (0x101D0, 0x101FF)),),
- (('Carian', (0x102A0, 0x102DF)),
- ('Lycian', (0x10280, 0x1029F)),
- ('Lydian', (0x10920, 0x1093F))),
- (('Domino Tiles', (0x1F030, 0x1F09F)),
- ('Mahjong Tiles', (0x1F000, 0x1F02F))),
+ (("Basic Latin", (0x0000, 0x007F)),),
+ (("Latin-1 Supplement", (0x0080, 0x00FF)),),
+ (("Latin Extended-A", (0x0100, 0x017F)),),
+ (("Latin Extended-B", (0x0180, 0x024F)),),
+ (
+ ("IPA Extensions", (0x0250, 0x02AF)),
+ ("Phonetic Extensions", (0x1D00, 0x1D7F)),
+ ("Phonetic Extensions Supplement", (0x1D80, 0x1DBF)),
+ ),
+ (
+ ("Spacing Modifier Letters", (0x02B0, 0x02FF)),
+ ("Modifier Tone Letters", (0xA700, 0xA71F)),
+ ),
+ (
+ ("Combining Diacritical Marks", (0x0300, 0x036F)),
+ ("Combining Diacritical Marks Supplement", (0x1DC0, 0x1DFF)),
+ ),
+ (("Greek and Coptic", (0x0370, 0x03FF)),),
+ (("Coptic", (0x2C80, 0x2CFF)),),
+ (
+ ("Cyrillic", (0x0400, 0x04FF)),
+ ("Cyrillic Supplement", (0x0500, 0x052F)),
+ ("Cyrillic Extended-A", (0x2DE0, 0x2DFF)),
+ ("Cyrillic Extended-B", (0xA640, 0xA69F)),
+ ),
+ (("Armenian", (0x0530, 0x058F)),),
+ (("Hebrew", (0x0590, 0x05FF)),),
+ (("Vai", (0xA500, 0xA63F)),),
+ (("Arabic", (0x0600, 0x06FF)), ("Arabic Supplement", (0x0750, 0x077F))),
+ (("NKo", (0x07C0, 0x07FF)),),
+ (("Devanagari", (0x0900, 0x097F)),),
+ (("Bengali", (0x0980, 0x09FF)),),
+ (("Gurmukhi", (0x0A00, 0x0A7F)),),
+ (("Gujarati", (0x0A80, 0x0AFF)),),
+ (("Oriya", (0x0B00, 0x0B7F)),),
+ (("Tamil", (0x0B80, 0x0BFF)),),
+ (("Telugu", (0x0C00, 0x0C7F)),),
+ (("Kannada", (0x0C80, 0x0CFF)),),
+ (("Malayalam", (0x0D00, 0x0D7F)),),
+ (("Thai", (0x0E00, 0x0E7F)),),
+ (("Lao", (0x0E80, 0x0EFF)),),
+ (("Georgian", (0x10A0, 0x10FF)), ("Georgian Supplement", (0x2D00, 0x2D2F))),
+ (("Balinese", (0x1B00, 0x1B7F)),),
+ (("Hangul Jamo", (0x1100, 0x11FF)),),
+ (
+ ("Latin Extended Additional", (0x1E00, 0x1EFF)),
+ ("Latin Extended-C", (0x2C60, 0x2C7F)),
+ ("Latin Extended-D", (0xA720, 0xA7FF)),
+ ),
+ (("Greek Extended", (0x1F00, 0x1FFF)),),
+ (
+ ("General Punctuation", (0x2000, 0x206F)),
+ ("Supplemental Punctuation", (0x2E00, 0x2E7F)),
+ ),
+ (("Superscripts And Subscripts", (0x2070, 0x209F)),),
+ (("Currency Symbols", (0x20A0, 0x20CF)),),
+ (("Combining Diacritical Marks For Symbols", (0x20D0, 0x20FF)),),
+ (("Letterlike Symbols", (0x2100, 0x214F)),),
+ (("Number Forms", (0x2150, 0x218F)),),
+ (
+ ("Arrows", (0x2190, 0x21FF)),
+ ("Supplemental Arrows-A", (0x27F0, 0x27FF)),
+ ("Supplemental Arrows-B", (0x2900, 0x297F)),
+ ("Miscellaneous Symbols and Arrows", (0x2B00, 0x2BFF)),
+ ),
+ (
+ ("Mathematical Operators", (0x2200, 0x22FF)),
+ ("Supplemental Mathematical Operators", (0x2A00, 0x2AFF)),
+ ("Miscellaneous Mathematical Symbols-A", (0x27C0, 0x27EF)),
+ ("Miscellaneous Mathematical Symbols-B", (0x2980, 0x29FF)),
+ ),
+ (("Miscellaneous Technical", (0x2300, 0x23FF)),),
+ (("Control Pictures", (0x2400, 0x243F)),),
+ (("Optical Character Recognition", (0x2440, 0x245F)),),
+ (("Enclosed Alphanumerics", (0x2460, 0x24FF)),),
+ (("Box Drawing", (0x2500, 0x257F)),),
+ (("Block Elements", (0x2580, 0x259F)),),
+ (("Geometric Shapes", (0x25A0, 0x25FF)),),
+ (("Miscellaneous Symbols", (0x2600, 0x26FF)),),
+ (("Dingbats", (0x2700, 0x27BF)),),
+ (("CJK Symbols And Punctuation", (0x3000, 0x303F)),),
+ (("Hiragana", (0x3040, 0x309F)),),
+ (
+ ("Katakana", (0x30A0, 0x30FF)),
+ ("Katakana Phonetic Extensions", (0x31F0, 0x31FF)),
+ ),
+ (("Bopomofo", (0x3100, 0x312F)), ("Bopomofo Extended", (0x31A0, 0x31BF))),
+ (("Hangul Compatibility Jamo", (0x3130, 0x318F)),),
+ (("Phags-pa", (0xA840, 0xA87F)),),
+ (("Enclosed CJK Letters And Months", (0x3200, 0x32FF)),),
+ (("CJK Compatibility", (0x3300, 0x33FF)),),
+ (("Hangul Syllables", (0xAC00, 0xD7AF)),),
+ (("Non-Plane 0 *", (0xD800, 0xDFFF)),),
+ (("Phoenician", (0x10900, 0x1091F)),),
+ (
+ ("CJK Unified Ideographs", (0x4E00, 0x9FFF)),
+ ("CJK Radicals Supplement", (0x2E80, 0x2EFF)),
+ ("Kangxi Radicals", (0x2F00, 0x2FDF)),
+ ("Ideographic Description Characters", (0x2FF0, 0x2FFF)),
+ ("CJK Unified Ideographs Extension A", (0x3400, 0x4DBF)),
+ ("CJK Unified Ideographs Extension B", (0x20000, 0x2A6DF)),
+ ("Kanbun", (0x3190, 0x319F)),
+ ),
+ (("Private Use Area (plane 0)", (0xE000, 0xF8FF)),),
+ (
+ ("CJK Strokes", (0x31C0, 0x31EF)),
+ ("CJK Compatibility Ideographs", (0xF900, 0xFAFF)),
+ ("CJK Compatibility Ideographs Supplement", (0x2F800, 0x2FA1F)),
+ ),
+ (("Alphabetic Presentation Forms", (0xFB00, 0xFB4F)),),
+ (("Arabic Presentation Forms-A", (0xFB50, 0xFDFF)),),
+ (("Combining Half Marks", (0xFE20, 0xFE2F)),),
+ (
+ ("Vertical Forms", (0xFE10, 0xFE1F)),
+ ("CJK Compatibility Forms", (0xFE30, 0xFE4F)),
+ ),
+ (("Small Form Variants", (0xFE50, 0xFE6F)),),
+ (("Arabic Presentation Forms-B", (0xFE70, 0xFEFF)),),
+ (("Halfwidth And Fullwidth Forms", (0xFF00, 0xFFEF)),),
+ (("Specials", (0xFFF0, 0xFFFF)),),
+ (("Tibetan", (0x0F00, 0x0FFF)),),
+ (("Syriac", (0x0700, 0x074F)),),
+ (("Thaana", (0x0780, 0x07BF)),),
+ (("Sinhala", (0x0D80, 0x0DFF)),),
+ (("Myanmar", (0x1000, 0x109F)),),
+ (
+ ("Ethiopic", (0x1200, 0x137F)),
+ ("Ethiopic Supplement", (0x1380, 0x139F)),
+ ("Ethiopic Extended", (0x2D80, 0x2DDF)),
+ ),
+ (("Cherokee", (0x13A0, 0x13FF)),),
+ (("Unified Canadian Aboriginal Syllabics", (0x1400, 0x167F)),),
+ (("Ogham", (0x1680, 0x169F)),),
+ (("Runic", (0x16A0, 0x16FF)),),
+ (("Khmer", (0x1780, 0x17FF)), ("Khmer Symbols", (0x19E0, 0x19FF))),
+ (("Mongolian", (0x1800, 0x18AF)),),
+ (("Braille Patterns", (0x2800, 0x28FF)),),
+ (("Yi Syllables", (0xA000, 0xA48F)), ("Yi Radicals", (0xA490, 0xA4CF))),
+ (
+ ("Tagalog", (0x1700, 0x171F)),
+ ("Hanunoo", (0x1720, 0x173F)),
+ ("Buhid", (0x1740, 0x175F)),
+ ("Tagbanwa", (0x1760, 0x177F)),
+ ),
+ (("Old Italic", (0x10300, 0x1032F)),),
+ (("Gothic", (0x10330, 0x1034F)),),
+ (("Deseret", (0x10400, 0x1044F)),),
+ (
+ ("Byzantine Musical Symbols", (0x1D000, 0x1D0FF)),
+ ("Musical Symbols", (0x1D100, 0x1D1FF)),
+ ("Ancient Greek Musical Notation", (0x1D200, 0x1D24F)),
+ ),
+ (("Mathematical Alphanumeric Symbols", (0x1D400, 0x1D7FF)),),
+ (
+ ("Private Use (plane 15)", (0xF0000, 0xFFFFD)),
+ ("Private Use (plane 16)", (0x100000, 0x10FFFD)),
+ ),
+ (
+ ("Variation Selectors", (0xFE00, 0xFE0F)),
+ ("Variation Selectors Supplement", (0xE0100, 0xE01EF)),
+ ),
+ (("Tags", (0xE0000, 0xE007F)),),
+ (("Limbu", (0x1900, 0x194F)),),
+ (("Tai Le", (0x1950, 0x197F)),),
+ (("New Tai Lue", (0x1980, 0x19DF)),),
+ (("Buginese", (0x1A00, 0x1A1F)),),
+ (("Glagolitic", (0x2C00, 0x2C5F)),),
+ (("Tifinagh", (0x2D30, 0x2D7F)),),
+ (("Yijing Hexagram Symbols", (0x4DC0, 0x4DFF)),),
+ (("Syloti Nagri", (0xA800, 0xA82F)),),
+ (
+ ("Linear B Syllabary", (0x10000, 0x1007F)),
+ ("Linear B Ideograms", (0x10080, 0x100FF)),
+ ("Aegean Numbers", (0x10100, 0x1013F)),
+ ),
+ (("Ancient Greek Numbers", (0x10140, 0x1018F)),),
+ (("Ugaritic", (0x10380, 0x1039F)),),
+ (("Old Persian", (0x103A0, 0x103DF)),),
+ (("Shavian", (0x10450, 0x1047F)),),
+ (("Osmanya", (0x10480, 0x104AF)),),
+ (("Cypriot Syllabary", (0x10800, 0x1083F)),),
+ (("Kharoshthi", (0x10A00, 0x10A5F)),),
+ (("Tai Xuan Jing Symbols", (0x1D300, 0x1D35F)),),
+ (
+ ("Cuneiform", (0x12000, 0x123FF)),
+ ("Cuneiform Numbers and Punctuation", (0x12400, 0x1247F)),
+ ),
+ (("Counting Rod Numerals", (0x1D360, 0x1D37F)),),
+ (("Sundanese", (0x1B80, 0x1BBF)),),
+ (("Lepcha", (0x1C00, 0x1C4F)),),
+ (("Ol Chiki", (0x1C50, 0x1C7F)),),
+ (("Saurashtra", (0xA880, 0xA8DF)),),
+ (("Kayah Li", (0xA900, 0xA92F)),),
+ (("Rejang", (0xA930, 0xA95F)),),
+ (("Cham", (0xAA00, 0xAA5F)),),
+ (("Ancient Symbols", (0x10190, 0x101CF)),),
+ (("Phaistos Disc", (0x101D0, 0x101FF)),),
+ (
+ ("Carian", (0x102A0, 0x102DF)),
+ ("Lycian", (0x10280, 0x1029F)),
+ ("Lydian", (0x10920, 0x1093F)),
+ ),
+ (("Domino Tiles", (0x1F030, 0x1F09F)), ("Mahjong Tiles", (0x1F000, 0x1F02F))),
)
_unicodeStarts = []
_unicodeValues = [None]
+
def _getUnicodeRanges():
- # build the ranges of codepoints for each unicode range bit, and cache result
- if not _unicodeStarts:
- unicodeRanges = [
- (start, (stop, bit)) for bit, blocks in enumerate(OS2_UNICODE_RANGES)
- for _, (start, stop) in blocks]
- for start, (stop, bit) in sorted(unicodeRanges):
- _unicodeStarts.append(start)
- _unicodeValues.append((stop, bit))
- return _unicodeStarts, _unicodeValues
+ # build the ranges of codepoints for each unicode range bit, and cache result
+ if not _unicodeStarts:
+ unicodeRanges = [
+ (start, (stop, bit))
+ for bit, blocks in enumerate(OS2_UNICODE_RANGES)
+ for _, (start, stop) in blocks
+ ]
+ for start, (stop, bit) in sorted(unicodeRanges):
+ _unicodeStarts.append(start)
+ _unicodeValues.append((stop, bit))
+ return _unicodeStarts, _unicodeValues
def intersectUnicodeRanges(unicodes, inverse=False):
- """ Intersect a sequence of (int) Unicode codepoints with the Unicode block
- ranges defined in the OpenType specification v1.7, and return the set of
- 'ulUnicodeRanges' bits for which there is at least ONE intersection.
- If 'inverse' is True, return the the bits for which there is NO intersection.
-
- >>> intersectUnicodeRanges([0x0410]) == {9}
- True
- >>> intersectUnicodeRanges([0x0410, 0x1F000]) == {9, 57, 122}
- True
- >>> intersectUnicodeRanges([0x0410, 0x1F000], inverse=True) == (
- ... set(range(len(OS2_UNICODE_RANGES))) - {9, 57, 122})
- True
- """
- unicodes = set(unicodes)
- unicodestarts, unicodevalues = _getUnicodeRanges()
- bits = set()
- for code in unicodes:
- stop, bit = unicodevalues[bisect.bisect(unicodestarts, code)]
- if code <= stop:
- bits.add(bit)
- # The spec says that bit 57 ("Non Plane 0") implies that there's
- # at least one codepoint beyond the BMP; so I also include all
- # the non-BMP codepoints here
- if any(0x10000 <= code < 0x110000 for code in unicodes):
- bits.add(57)
- return set(range(len(OS2_UNICODE_RANGES))) - bits if inverse else bits
+ """Intersect a sequence of (int) Unicode codepoints with the Unicode block
+ ranges defined in the OpenType specification v1.7, and return the set of
+ 'ulUnicodeRanges' bits for which there is at least ONE intersection.
+ If 'inverse' is True, return the the bits for which there is NO intersection.
+
+ >>> intersectUnicodeRanges([0x0410]) == {9}
+ True
+ >>> intersectUnicodeRanges([0x0410, 0x1F000]) == {9, 57, 122}
+ True
+ >>> intersectUnicodeRanges([0x0410, 0x1F000], inverse=True) == (
+ ... set(range(len(OS2_UNICODE_RANGES))) - {9, 57, 122})
+ True
+ """
+ unicodes = set(unicodes)
+ unicodestarts, unicodevalues = _getUnicodeRanges()
+ bits = set()
+ for code in unicodes:
+ stop, bit = unicodevalues[bisect.bisect(unicodestarts, code)]
+ if code <= stop:
+ bits.add(bit)
+ # The spec says that bit 57 ("Non Plane 0") implies that there's
+ # at least one codepoint beyond the BMP; so I also include all
+ # the non-BMP codepoints here
+ if any(0x10000 <= code < 0x110000 for code in unicodes):
+ bits.add(57)
+ return set(range(len(OS2_UNICODE_RANGES))) - bits if inverse else bits
if __name__ == "__main__":
- import doctest, sys
- sys.exit(doctest.testmod().failed)
+ import doctest, sys
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/ttLib/tables/S_I_N_G_.py b/Lib/fontTools/ttLib/tables/S_I_N_G_.py
index 73246df4..4522c06c 100644
--- a/Lib/fontTools/ttLib/tables/S_I_N_G_.py
+++ b/Lib/fontTools/ttLib/tables/S_I_N_G_.py
@@ -20,74 +20,73 @@ SINGFormat = """
class table_S_I_N_G_(DefaultTable.DefaultTable):
+ dependencies = []
- dependencies = []
+ def decompile(self, data, ttFont):
+ dummy, rest = sstruct.unpack2(SINGFormat, data, self)
+ self.uniqueName = self.decompileUniqueName(self.uniqueName)
+ self.nameLength = byteord(self.nameLength)
+ assert len(rest) == self.nameLength
+ self.baseGlyphName = tostr(rest)
- def decompile(self, data, ttFont):
- dummy, rest = sstruct.unpack2(SINGFormat, data, self)
- self.uniqueName = self.decompileUniqueName(self.uniqueName)
- self.nameLength = byteord(self.nameLength)
- assert len(rest) == self.nameLength
- self.baseGlyphName = tostr(rest)
+ rawMETAMD5 = self.METAMD5
+ self.METAMD5 = "[" + hex(byteord(self.METAMD5[0]))
+ for char in rawMETAMD5[1:]:
+ self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char))
+ self.METAMD5 = self.METAMD5 + "]"
- rawMETAMD5 = self.METAMD5
- self.METAMD5 = "[" + hex(byteord(self.METAMD5[0]))
- for char in rawMETAMD5[1:]:
- self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char))
- self.METAMD5 = self.METAMD5 + "]"
+ def decompileUniqueName(self, data):
+ name = ""
+ for char in data:
+ val = byteord(char)
+ if val == 0:
+ break
+ if (val > 31) or (val < 128):
+ name += chr(val)
+ else:
+ octString = oct(val)
+ if len(octString) > 3:
+ octString = octString[1:] # chop off that leading zero.
+ elif len(octString) < 3:
+ octString.zfill(3)
+ name += "\\" + octString
+ return name
- def decompileUniqueName(self, data):
- name = ""
- for char in data:
- val = byteord(char)
- if val == 0:
- break
- if (val > 31) or (val < 128):
- name += chr(val)
- else:
- octString = oct(val)
- if len(octString) > 3:
- octString = octString[1:] # chop off that leading zero.
- elif len(octString) < 3:
- octString.zfill(3)
- name += "\\" + octString
- return name
+ def compile(self, ttFont):
+ d = self.__dict__.copy()
+ d["nameLength"] = bytechr(len(self.baseGlyphName))
+ d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28)
+ METAMD5List = eval(self.METAMD5)
+ d["METAMD5"] = b""
+ for val in METAMD5List:
+ d["METAMD5"] += bytechr(val)
+ assert len(d["METAMD5"]) == 16, "Failed to pack 16 byte MD5 hash in SING table"
+ data = sstruct.pack(SINGFormat, d)
+ data = data + tobytes(self.baseGlyphName)
+ return data
- def compile(self, ttFont):
- d = self.__dict__.copy()
- d["nameLength"] = bytechr(len(self.baseGlyphName))
- d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28)
- METAMD5List = eval(self.METAMD5)
- d["METAMD5"] = b""
- for val in METAMD5List:
- d["METAMD5"] += bytechr(val)
- assert (len(d["METAMD5"]) == 16), "Failed to pack 16 byte MD5 hash in SING table"
- data = sstruct.pack(SINGFormat, d)
- data = data + tobytes(self.baseGlyphName)
- return data
+ def compilecompileUniqueName(self, name, length):
+ nameLen = len(name)
+ if length <= nameLen:
+ name = name[: length - 1] + "\000"
+ else:
+ name += (nameLen - length) * "\000"
+ return name
- def compilecompileUniqueName(self, name, length):
- nameLen = len(name)
- if length <= nameLen:
- name = name[:length-1] + "\000"
- else:
- name += (nameLen - length) * "\000"
- return name
+ def toXML(self, writer, ttFont):
+ writer.comment("Most of this table will be recalculated by the compiler")
+ writer.newline()
+ formatstring, names, fixes = sstruct.getformat(SINGFormat)
+ for name in names:
+ value = getattr(self, name)
+ writer.simpletag(name, value=value)
+ writer.newline()
+ writer.simpletag("baseGlyphName", value=self.baseGlyphName)
+ writer.newline()
- def toXML(self, writer, ttFont):
- writer.comment("Most of this table will be recalculated by the compiler")
- writer.newline()
- formatstring, names, fixes = sstruct.getformat(SINGFormat)
- for name in names:
- value = getattr(self, name)
- writer.simpletag(name, value=value)
- writer.newline()
- writer.simpletag("baseGlyphName", value=self.baseGlyphName)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- value = attrs["value"]
- if name in ["uniqueName", "METAMD5", "baseGlyphName"]:
- setattr(self, name, value)
- else:
- setattr(self, name, safeEval(value))
+ def fromXML(self, name, attrs, content, ttFont):
+ value = attrs["value"]
+ if name in ["uniqueName", "METAMD5", "baseGlyphName"]:
+ setattr(self, name, value)
+ else:
+ setattr(self, name, safeEval(value))
diff --git a/Lib/fontTools/ttLib/tables/S_V_G_.py b/Lib/fontTools/ttLib/tables/S_V_G_.py
index 49e98d03..ebc2befd 100644
--- a/Lib/fontTools/ttLib/tables/S_V_G_.py
+++ b/Lib/fontTools/ttLib/tables/S_V_G_.py
@@ -50,148 +50,166 @@ doc_index_entry_format_0 = """
doc_index_entry_format_0Size = sstruct.calcsize(doc_index_entry_format_0)
-
class table_S_V_G_(DefaultTable.DefaultTable):
-
- def decompile(self, data, ttFont):
- self.docList = []
- # Version 0 is the standardized version of the table; and current.
- # https://www.microsoft.com/typography/otspec/svg.htm
- sstruct.unpack(SVG_format_0, data[:SVG_format_0Size], self)
- if self.version != 0:
- log.warning(
- "Unknown SVG table version '%s'. Decompiling as version 0.", self.version)
- # read in SVG Documents Index
- # data starts with the first entry of the entry list.
- pos = subTableStart = self.offsetToSVGDocIndex
- self.numEntries = struct.unpack(">H", data[pos:pos+2])[0]
- pos += 2
- if self.numEntries > 0:
- data2 = data[pos:]
- entries = []
- for i in range(self.numEntries):
- docIndexEntry, data2 = sstruct.unpack2(doc_index_entry_format_0, data2, DocumentIndexEntry())
- entries.append(docIndexEntry)
-
- for entry in entries:
- start = entry.svgDocOffset + subTableStart
- end = start + entry.svgDocLength
- doc = data[start:end]
- compressed = False
- if doc.startswith(b"\x1f\x8b"):
- import gzip
- bytesIO = BytesIO(doc)
- with gzip.GzipFile(None, "r", fileobj=bytesIO) as gunzipper:
- doc = gunzipper.read()
- del bytesIO
- compressed = True
- doc = tostr(doc, "utf_8")
- self.docList.append(
- SVGDocument(doc, entry.startGlyphID, entry.endGlyphID, compressed)
- )
-
- def compile(self, ttFont):
- version = 0
- offsetToSVGDocIndex = SVG_format_0Size # I start the SVGDocIndex right after the header.
- # get SGVDoc info.
- docList = []
- entryList = []
- numEntries = len(self.docList)
- datum = struct.pack(">H",numEntries)
- entryList.append(datum)
- curOffset = len(datum) + doc_index_entry_format_0Size*numEntries
- seenDocs = {}
- allCompressed = getattr(self, "compressed", False)
- for i, doc in enumerate(self.docList):
- if isinstance(doc, (list, tuple)):
- doc = SVGDocument(*doc)
- self.docList[i] = doc
- docBytes = tobytes(doc.data, encoding="utf_8")
- if (allCompressed or doc.compressed) and not docBytes.startswith(b"\x1f\x8b"):
- import gzip
- bytesIO = BytesIO()
- # mtime=0 strips the useless timestamp and makes gzip output reproducible;
- # equivalent to `gzip -n`
- with gzip.GzipFile(None, "w", fileobj=bytesIO, mtime=0) as gzipper:
- gzipper.write(docBytes)
- gzipped = bytesIO.getvalue()
- if len(gzipped) < len(docBytes):
- docBytes = gzipped
- del gzipped, bytesIO
- docLength = len(docBytes)
- if docBytes in seenDocs:
- docOffset = seenDocs[docBytes]
- else:
- docOffset = curOffset
- curOffset += docLength
- seenDocs[docBytes] = docOffset
- docList.append(docBytes)
- entry = struct.pack(">HHLL", doc.startGlyphID, doc.endGlyphID, docOffset, docLength)
- entryList.append(entry)
- entryList.extend(docList)
- svgDocData = bytesjoin(entryList)
-
- reserved = 0
- header = struct.pack(">HLL", version, offsetToSVGDocIndex, reserved)
- data = [header, svgDocData]
- data = bytesjoin(data)
- return data
-
- def toXML(self, writer, ttFont):
- for i, doc in enumerate(self.docList):
- if isinstance(doc, (list, tuple)):
- doc = SVGDocument(*doc)
- self.docList[i] = doc
- attrs = {"startGlyphID": doc.startGlyphID, "endGlyphID": doc.endGlyphID}
- if doc.compressed:
- attrs["compressed"] = 1
- writer.begintag("svgDoc", **attrs)
- writer.newline()
- writer.writecdata(doc.data)
- writer.newline()
- writer.endtag("svgDoc")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "svgDoc":
- if not hasattr(self, "docList"):
- self.docList = []
- doc = strjoin(content)
- doc = doc.strip()
- startGID = int(attrs["startGlyphID"])
- endGID = int(attrs["endGlyphID"])
- compressed = bool(safeEval(attrs.get("compressed", "0")))
- self.docList.append(SVGDocument(doc, startGID, endGID, compressed))
- else:
- log.warning("Unknown %s %s", name, content)
+ def decompile(self, data, ttFont):
+ self.docList = []
+ # Version 0 is the standardized version of the table; and current.
+ # https://www.microsoft.com/typography/otspec/svg.htm
+ sstruct.unpack(SVG_format_0, data[:SVG_format_0Size], self)
+ if self.version != 0:
+ log.warning(
+ "Unknown SVG table version '%s'. Decompiling as version 0.",
+ self.version,
+ )
+ # read in SVG Documents Index
+ # data starts with the first entry of the entry list.
+ pos = subTableStart = self.offsetToSVGDocIndex
+ self.numEntries = struct.unpack(">H", data[pos : pos + 2])[0]
+ pos += 2
+ if self.numEntries > 0:
+ data2 = data[pos:]
+ entries = []
+ for i in range(self.numEntries):
+ record_data = data2[
+ i
+ * doc_index_entry_format_0Size : (i + 1)
+ * doc_index_entry_format_0Size
+ ]
+ docIndexEntry = sstruct.unpack(
+ doc_index_entry_format_0, record_data, DocumentIndexEntry()
+ )
+ entries.append(docIndexEntry)
+
+ for entry in entries:
+ start = entry.svgDocOffset + subTableStart
+ end = start + entry.svgDocLength
+ doc = data[start:end]
+ compressed = False
+ if doc.startswith(b"\x1f\x8b"):
+ import gzip
+
+ bytesIO = BytesIO(doc)
+ with gzip.GzipFile(None, "r", fileobj=bytesIO) as gunzipper:
+ doc = gunzipper.read()
+ del bytesIO
+ compressed = True
+ doc = tostr(doc, "utf_8")
+ self.docList.append(
+ SVGDocument(doc, entry.startGlyphID, entry.endGlyphID, compressed)
+ )
+
+ def compile(self, ttFont):
+ version = 0
+ offsetToSVGDocIndex = (
+ SVG_format_0Size # I start the SVGDocIndex right after the header.
+ )
+ # get SGVDoc info.
+ docList = []
+ entryList = []
+ numEntries = len(self.docList)
+ datum = struct.pack(">H", numEntries)
+ entryList.append(datum)
+ curOffset = len(datum) + doc_index_entry_format_0Size * numEntries
+ seenDocs = {}
+ allCompressed = getattr(self, "compressed", False)
+ for i, doc in enumerate(self.docList):
+ if isinstance(doc, (list, tuple)):
+ doc = SVGDocument(*doc)
+ self.docList[i] = doc
+ docBytes = tobytes(doc.data, encoding="utf_8")
+ if (allCompressed or doc.compressed) and not docBytes.startswith(
+ b"\x1f\x8b"
+ ):
+ import gzip
+
+ bytesIO = BytesIO()
+ # mtime=0 strips the useless timestamp and makes gzip output reproducible;
+ # equivalent to `gzip -n`
+ with gzip.GzipFile(None, "w", fileobj=bytesIO, mtime=0) as gzipper:
+ gzipper.write(docBytes)
+ gzipped = bytesIO.getvalue()
+ if len(gzipped) < len(docBytes):
+ docBytes = gzipped
+ del gzipped, bytesIO
+ docLength = len(docBytes)
+ if docBytes in seenDocs:
+ docOffset = seenDocs[docBytes]
+ else:
+ docOffset = curOffset
+ curOffset += docLength
+ seenDocs[docBytes] = docOffset
+ docList.append(docBytes)
+ entry = struct.pack(
+ ">HHLL", doc.startGlyphID, doc.endGlyphID, docOffset, docLength
+ )
+ entryList.append(entry)
+ entryList.extend(docList)
+ svgDocData = bytesjoin(entryList)
+
+ reserved = 0
+ header = struct.pack(">HLL", version, offsetToSVGDocIndex, reserved)
+ data = [header, svgDocData]
+ data = bytesjoin(data)
+ return data
+
+ def toXML(self, writer, ttFont):
+ for i, doc in enumerate(self.docList):
+ if isinstance(doc, (list, tuple)):
+ doc = SVGDocument(*doc)
+ self.docList[i] = doc
+ attrs = {"startGlyphID": doc.startGlyphID, "endGlyphID": doc.endGlyphID}
+ if doc.compressed:
+ attrs["compressed"] = 1
+ writer.begintag("svgDoc", **attrs)
+ writer.newline()
+ writer.writecdata(doc.data)
+ writer.newline()
+ writer.endtag("svgDoc")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "svgDoc":
+ if not hasattr(self, "docList"):
+ self.docList = []
+ doc = strjoin(content)
+ doc = doc.strip()
+ startGID = int(attrs["startGlyphID"])
+ endGID = int(attrs["endGlyphID"])
+ compressed = bool(safeEval(attrs.get("compressed", "0")))
+ self.docList.append(SVGDocument(doc, startGID, endGID, compressed))
+ else:
+ log.warning("Unknown %s %s", name, content)
class DocumentIndexEntry(object):
- def __init__(self):
- self.startGlyphID = None # USHORT
- self.endGlyphID = None # USHORT
- self.svgDocOffset = None # ULONG
- self.svgDocLength = None # ULONG
+ def __init__(self):
+ self.startGlyphID = None # USHORT
+ self.endGlyphID = None # USHORT
+ self.svgDocOffset = None # ULONG
+ self.svgDocLength = None # ULONG
- def __repr__(self):
- return "startGlyphID: %s, endGlyphID: %s, svgDocOffset: %s, svgDocLength: %s" % (self.startGlyphID, self.endGlyphID, self.svgDocOffset, self.svgDocLength)
+ def __repr__(self):
+ return (
+ "startGlyphID: %s, endGlyphID: %s, svgDocOffset: %s, svgDocLength: %s"
+ % (self.startGlyphID, self.endGlyphID, self.svgDocOffset, self.svgDocLength)
+ )
@dataclass
class SVGDocument(Sequence):
- data: str
- startGlyphID: int
- endGlyphID: int
- compressed: bool = False
-
- # Previously, the SVG table's docList attribute contained a lists of 3 items:
- # [doc, startGlyphID, endGlyphID]; later, we added a `compressed` attribute.
- # For backward compatibility with code that depends of them being sequences of
- # fixed length=3, we subclass the Sequence abstract base class and pretend only
- # the first three items are present. 'compressed' is only accessible via named
- # attribute lookup like regular dataclasses: i.e. `doc.compressed`, not `doc[3]`
- def __getitem__(self, index):
- return astuple(self)[:3][index]
-
- def __len__(self):
- return 3
+ data: str
+ startGlyphID: int
+ endGlyphID: int
+ compressed: bool = False
+
+ # Previously, the SVG table's docList attribute contained a lists of 3 items:
+ # [doc, startGlyphID, endGlyphID]; later, we added a `compressed` attribute.
+ # For backward compatibility with code that depends of them being sequences of
+ # fixed length=3, we subclass the Sequence abstract base class and pretend only
+ # the first three items are present. 'compressed' is only accessible via named
+ # attribute lookup like regular dataclasses: i.e. `doc.compressed`, not `doc[3]`
+ def __getitem__(self, index):
+ return astuple(self)[:3][index]
+
+ def __len__(self):
+ return 3
diff --git a/Lib/fontTools/ttLib/tables/S__i_l_f.py b/Lib/fontTools/ttLib/tables/S__i_l_f.py
index f326c386..324ffd01 100644
--- a/Lib/fontTools/ttLib/tables/S__i_l_f.py
+++ b/Lib/fontTools/ttLib/tables/S__i_l_f.py
@@ -1,6 +1,7 @@
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr
from fontTools.misc.textTools import byteord, safeEval
+
# from itertools import *
from . import DefaultTable
from . import grUtils
@@ -8,28 +9,28 @@ from array import array
from functools import reduce
import struct, re, sys
-Silf_hdr_format = '''
+Silf_hdr_format = """
>
version: 16.16F
-'''
+"""
-Silf_hdr_format_3 = '''
+Silf_hdr_format_3 = """
>
version: 16.16F
compilerVersion: L
numSilf: H
x
x
-'''
+"""
-Silf_part1_format_v3 = '''
+Silf_part1_format_v3 = """
>
ruleVersion: 16.16F
passOffset: H
pseudosOffset: H
-'''
+"""
-Silf_part1_format = '''
+Silf_part1_format = """
>
maxGlyphID: H
extraAscent: h
@@ -48,9 +49,9 @@ Silf_part1_format = '''
attrMirroring: B
attrSkipPasses: B
numJLevels: B
-'''
+"""
-Silf_justify_format = '''
+Silf_justify_format = """
>
attrStretch: B
attrShrink: B
@@ -60,9 +61,9 @@ Silf_justify_format = '''
x
x
x
-'''
+"""
-Silf_part2_format = '''
+Silf_part2_format = """
>
numLigComp: H
numUserDefn: B
@@ -73,41 +74,41 @@ Silf_part2_format = '''
x
x
numCritFeatures: B
-'''
+"""
-Silf_pseudomap_format = '''
+Silf_pseudomap_format = """
>
unicode: L
nPseudo: H
-'''
+"""
-Silf_pseudomap_format_h = '''
+Silf_pseudomap_format_h = """
>
unicode: H
nPseudo: H
-'''
+"""
-Silf_classmap_format = '''
+Silf_classmap_format = """
>
numClass: H
numLinear: H
-'''
+"""
-Silf_lookupclass_format = '''
+Silf_lookupclass_format = """
>
numIDs: H
searchRange: H
entrySelector: H
rangeShift: H
-'''
+"""
-Silf_lookuppair_format = '''
+Silf_lookuppair_format = """
>
glyphId: H
index: H
-'''
+"""
-Silf_pass_format = '''
+Silf_pass_format = """
>
flags: B
maxRuleLoop: B
@@ -123,7 +124,7 @@ Silf_pass_format = '''
numTransitional: H
numSuccess: H
numColumns: H
-'''
+"""
aCode_info = (
("NOP", 0),
@@ -142,7 +143,7 @@ aCode_info = (
("TRUNC8", 0),
("TRUNC16", 0),
("COND", 0),
- ("AND", 0), # x10
+ ("AND", 0), # x10
("OR", 0),
("NOT", 0),
("EQUAL", 0),
@@ -158,7 +159,7 @@ aCode_info = (
("PUT_SUBS_8BIT_OBS", "bBB"),
("PUT_COPY", "b"),
("INSERT", 0),
- ("DELETE", 0), # x20
+ ("DELETE", 0), # x20
("ASSOC", -1),
("CNTXT_ITEM", "bB"),
("ATTR_SET", "B"),
@@ -174,7 +175,7 @@ aCode_info = (
("PUSH_ATT_TO_GLYPH_METRIC", "Bbb"),
("PUSH_ISLOT_ATTR", "Bbb"),
("PUSH_IGLYPH_ATTR", "Bbb"),
- ("POP_RET", 0), # x30
+ ("POP_RET", 0), # x30
("RET_ZERO", 0),
("RET_TRUE", 0),
("IATTR_SET", "BB"),
@@ -190,31 +191,33 @@ aCode_info = (
("PUSH_ATT_TO_GLYPH_ATTR", ">Hb"),
("BITOR", 0),
("BITAND", 0),
- ("BITNOT", 0), # x40
+ ("BITNOT", 0), # x40
("BITSET", ">HH"),
- ("SET_FEAT", "Bb")
+ ("SET_FEAT", "Bb"),
)
-aCode_map = dict([(x[0], (i, x[1])) for i,x in enumerate(aCode_info)])
+aCode_map = dict([(x[0], (i, x[1])) for i, x in enumerate(aCode_info)])
+
def disassemble(aCode):
codelen = len(aCode)
pc = 0
res = []
while pc < codelen:
- opcode = byteord(aCode[pc:pc+1])
+ opcode = byteord(aCode[pc : pc + 1])
if opcode > len(aCode_info):
instr = aCode_info[0]
else:
instr = aCode_info[opcode]
pc += 1
- if instr[1] != 0 and pc >= codelen : return res
+ if instr[1] != 0 and pc >= codelen:
+ return res
if instr[1] == -1:
count = byteord(aCode[pc])
fmt = "%dB" % count
pc += 1
elif instr[1] == 0:
fmt = ""
- else :
+ else:
fmt = instr[1]
if fmt == "":
res.append(instr[0])
@@ -224,7 +227,10 @@ def disassemble(aCode):
pc += struct.calcsize(fmt)
return res
+
instre = re.compile(r"^\s*([^(]+)\s*(?:\(([^)]+)\))?")
+
+
def assemble(instrs):
res = b""
for inst in instrs:
@@ -239,11 +245,12 @@ def assemble(instrs):
parms = [int(x) for x in re.split(r",\s*", m.group(2))]
if parmfmt == -1:
l = len(parms)
- res += struct.pack(("%dB" % (l+1)), l, *parms)
+ res += struct.pack(("%dB" % (l + 1)), l, *parms)
else:
res += struct.pack(parmfmt, *parms)
return res
+
def writecode(tag, writer, instrs):
writer.begintag(tag)
writer.newline()
@@ -253,41 +260,71 @@ def writecode(tag, writer, instrs):
writer.endtag(tag)
writer.newline()
+
def readcode(content):
res = []
- for e in content_string(content).split('\n'):
+ for e in content_string(content).split("\n"):
e = e.strip()
- if not len(e): continue
+ if not len(e):
+ continue
res.append(e)
return assemble(res)
-
-attrs_info=('flags', 'extraAscent', 'extraDescent', 'maxGlyphID',
- 'numLigComp', 'numUserDefn', 'maxCompPerLig', 'direction', 'lbGID')
-attrs_passindexes = ('iSubst', 'iPos', 'iJust', 'iBidi')
-attrs_contexts = ('maxPreContext', 'maxPostContext')
-attrs_attributes = ('attrPseudo', 'attrBreakWeight', 'attrDirectionality',
- 'attrMirroring', 'attrSkipPasses', 'attCollisions')
-pass_attrs_info = ('flags', 'maxRuleLoop', 'maxRuleContext', 'maxBackup',
- 'minRulePreContext', 'maxRulePreContext', 'collisionThreshold')
-pass_attrs_fsm = ('numRows', 'numTransitional', 'numSuccess', 'numColumns')
+
+
+attrs_info = (
+ "flags",
+ "extraAscent",
+ "extraDescent",
+ "maxGlyphID",
+ "numLigComp",
+ "numUserDefn",
+ "maxCompPerLig",
+ "direction",
+ "lbGID",
+)
+attrs_passindexes = ("iSubst", "iPos", "iJust", "iBidi")
+attrs_contexts = ("maxPreContext", "maxPostContext")
+attrs_attributes = (
+ "attrPseudo",
+ "attrBreakWeight",
+ "attrDirectionality",
+ "attrMirroring",
+ "attrSkipPasses",
+ "attCollisions",
+)
+pass_attrs_info = (
+ "flags",
+ "maxRuleLoop",
+ "maxRuleContext",
+ "maxBackup",
+ "minRulePreContext",
+ "maxRulePreContext",
+ "collisionThreshold",
+)
+pass_attrs_fsm = ("numRows", "numTransitional", "numSuccess", "numColumns")
+
def writesimple(tag, self, writer, *attrkeys):
attrs = dict([(k, getattr(self, k)) for k in attrkeys])
writer.simpletag(tag, **attrs)
writer.newline()
+
def getSimple(self, attrs, *attr_list):
for k in attr_list:
if k in attrs:
setattr(self, k, int(safeEval(attrs[k])))
+
def content_string(contents):
res = ""
for element in contents:
- if isinstance(element, tuple): continue
+ if isinstance(element, tuple):
+ continue
res += element
return res.strip()
+
def wrapline(writer, dat, length=80):
currline = ""
for d in dat:
@@ -300,11 +337,13 @@ def wrapline(writer, dat, length=80):
writer.write(currline[:-1])
writer.newline()
-class _Object() :
+
+class _Object:
pass
+
class table_S__i_l_f(DefaultTable.DefaultTable):
- '''Silf table support'''
+ """Silf table support"""
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
@@ -318,7 +357,7 @@ class table_S__i_l_f(DefaultTable.DefaultTable):
sstruct.unpack2(Silf_hdr_format_3, data, self)
base = sstruct.calcsize(Silf_hdr_format_3)
elif self.version < 3.0:
- self.numSilf = struct.unpack('>H', data[4:6])
+ self.numSilf = struct.unpack(">H", data[4:6])
self.scheme = 0
self.compilerVersion = 0
base = 8
@@ -327,7 +366,7 @@ class table_S__i_l_f(DefaultTable.DefaultTable):
sstruct.unpack2(Silf_hdr_format_3, data, self)
base = sstruct.calcsize(Silf_hdr_format_3)
- silfoffsets = struct.unpack_from(('>%dL' % self.numSilf), data[base:])
+ silfoffsets = struct.unpack_from((">%dL" % self.numSilf), data[base:])
for offset in silfoffsets:
s = Silf()
self.silfs.append(s)
@@ -348,38 +387,44 @@ class table_S__i_l_f(DefaultTable.DefaultTable):
offset += len(subdata)
data += subdata
if self.version >= 5.0:
- return grUtils.compress(self.scheme, hdr+data)
- return hdr+data
+ return grUtils.compress(self.scheme, hdr + data)
+ return hdr + data
def toXML(self, writer, ttFont):
- writer.comment('Attributes starting with _ are informative only')
+ writer.comment("Attributes starting with _ are informative only")
writer.newline()
- writer.simpletag('version', version=self.version,
- compilerVersion=self.compilerVersion, compressionScheme=self.scheme)
+ writer.simpletag(
+ "version",
+ version=self.version,
+ compilerVersion=self.compilerVersion,
+ compressionScheme=self.scheme,
+ )
writer.newline()
for s in self.silfs:
- writer.begintag('silf')
+ writer.begintag("silf")
writer.newline()
s.toXML(writer, ttFont, self.version)
- writer.endtag('silf')
+ writer.endtag("silf")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
- if name == 'version':
- self.scheme=int(safeEval(attrs['compressionScheme']))
- self.version = float(safeEval(attrs['version']))
- self.compilerVersion = int(safeEval(attrs['compilerVersion']))
+ if name == "version":
+ self.scheme = int(safeEval(attrs["compressionScheme"]))
+ self.version = float(safeEval(attrs["version"]))
+ self.compilerVersion = int(safeEval(attrs["compilerVersion"]))
return
- if name == 'silf':
+ if name == "silf":
s = Silf()
self.silfs.append(s)
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, attrs, subcontent = element
s.fromXML(tag, attrs, subcontent, ttFont, self.version)
+
class Silf(object):
- '''A particular Silf subtable'''
+ """A particular Silf subtable"""
def __init__(self):
self.passes = []
@@ -389,37 +434,59 @@ class Silf(object):
self.pMap = {}
def decompile(self, data, ttFont, version=2.0):
- if version >= 3.0 :
+ if version >= 3.0:
_, data = sstruct.unpack2(Silf_part1_format_v3, data, self)
- self.ruleVersion = float(floatToFixedToStr(self.ruleVersion, precisionBits=16))
+ self.ruleVersion = float(
+ floatToFixedToStr(self.ruleVersion, precisionBits=16)
+ )
_, data = sstruct.unpack2(Silf_part1_format, data, self)
for jlevel in range(self.numJLevels):
j, data = sstruct.unpack2(Silf_justify_format, data, _Object())
self.jLevels.append(j)
_, data = sstruct.unpack2(Silf_part2_format, data, self)
if self.numCritFeatures:
- self.critFeatures = struct.unpack_from(('>%dH' % self.numCritFeatures), data)
- data = data[self.numCritFeatures * 2 + 1:]
- (numScriptTag,) = struct.unpack_from('B', data)
+ self.critFeatures = struct.unpack_from(
+ (">%dH" % self.numCritFeatures), data
+ )
+ data = data[self.numCritFeatures * 2 + 1 :]
+ (numScriptTag,) = struct.unpack_from("B", data)
if numScriptTag:
- self.scriptTags = [struct.unpack("4s", data[x:x+4])[0].decode("ascii") for x in range(1, 1 + 4 * numScriptTag, 4)]
- data = data[1 + 4 * numScriptTag:]
- (self.lbGID,) = struct.unpack('>H', data[:2])
+ self.scriptTags = [
+ struct.unpack("4s", data[x : x + 4])[0].decode("ascii")
+ for x in range(1, 1 + 4 * numScriptTag, 4)
+ ]
+ data = data[1 + 4 * numScriptTag :]
+ (self.lbGID,) = struct.unpack(">H", data[:2])
if self.numPasses:
- self.oPasses = struct.unpack(('>%dL' % (self.numPasses+1)), data[2:6+4*self.numPasses])
- data = data[6 + 4 * self.numPasses:]
+ self.oPasses = struct.unpack(
+ (">%dL" % (self.numPasses + 1)), data[2 : 6 + 4 * self.numPasses]
+ )
+ data = data[6 + 4 * self.numPasses :]
(numPseudo,) = struct.unpack(">H", data[:2])
for i in range(numPseudo):
if version >= 3.0:
- pseudo = sstruct.unpack(Silf_pseudomap_format, data[8+6*i:14+6*i], _Object())
+ pseudo = sstruct.unpack(
+ Silf_pseudomap_format, data[8 + 6 * i : 14 + 6 * i], _Object()
+ )
else:
- pseudo = sstruct.unpack(Silf_pseudomap_format_h, data[8+4*i:12+4*i], _Object())
+ pseudo = sstruct.unpack(
+ Silf_pseudomap_format_h, data[8 + 4 * i : 12 + 4 * i], _Object()
+ )
self.pMap[pseudo.unicode] = ttFont.getGlyphName(pseudo.nPseudo)
- data = data[8 + 6 * numPseudo:]
- currpos = (sstruct.calcsize(Silf_part1_format)
- + sstruct.calcsize(Silf_justify_format) * self.numJLevels
- + sstruct.calcsize(Silf_part2_format) + 2 * self.numCritFeatures
- + 1 + 1 + 4 * numScriptTag + 6 + 4 * self.numPasses + 8 + 6 * numPseudo)
+ data = data[8 + 6 * numPseudo :]
+ currpos = (
+ sstruct.calcsize(Silf_part1_format)
+ + sstruct.calcsize(Silf_justify_format) * self.numJLevels
+ + sstruct.calcsize(Silf_part2_format)
+ + 2 * self.numCritFeatures
+ + 1
+ + 1
+ + 4 * numScriptTag
+ + 6
+ + 4 * self.numPasses
+ + 8
+ + 6 * numPseudo
+ )
if version >= 3.0:
currpos += sstruct.calcsize(Silf_part1_format_v3)
self.classes = Classes()
@@ -427,8 +494,11 @@ class Silf(object):
for i in range(self.numPasses):
p = Pass()
self.passes.append(p)
- p.decompile(data[self.oPasses[i]-currpos:self.oPasses[i+1]-currpos],
- ttFont, version)
+ p.decompile(
+ data[self.oPasses[i] - currpos : self.oPasses[i + 1] - currpos],
+ ttFont,
+ version,
+ )
def compile(self, ttFont, version=2.0):
self.numPasses = len(self.passes)
@@ -457,8 +527,9 @@ class Silf(object):
currpos = hdroffset + len(data) + 4 * (self.numPasses + 1)
self.pseudosOffset = currpos + len(data1)
for u, p in sorted(self.pMap.items()):
- data1 += struct.pack((">LH" if version >= 3.0 else ">HH"),
- u, ttFont.getGlyphID(p))
+ data1 += struct.pack(
+ (">LH" if version >= 3.0 else ">HH"), u, ttFont.getGlyphID(p)
+ )
data1 += self.classes.compile(ttFont, version)
currpos += len(data1)
data2 = b""
@@ -475,136 +546,147 @@ class Silf(object):
data3 = b""
return data3 + data + datao + data1 + data2
-
def toXML(self, writer, ttFont, version=2.0):
if version >= 3.0:
- writer.simpletag('version', ruleVersion=self.ruleVersion)
+ writer.simpletag("version", ruleVersion=self.ruleVersion)
writer.newline()
- writesimple('info', self, writer, *attrs_info)
- writesimple('passindexes', self, writer, *attrs_passindexes)
- writesimple('contexts', self, writer, *attrs_contexts)
- writesimple('attributes', self, writer, *attrs_attributes)
+ writesimple("info", self, writer, *attrs_info)
+ writesimple("passindexes", self, writer, *attrs_passindexes)
+ writesimple("contexts", self, writer, *attrs_contexts)
+ writesimple("attributes", self, writer, *attrs_attributes)
if len(self.jLevels):
- writer.begintag('justifications')
+ writer.begintag("justifications")
writer.newline()
jformat, jnames, jfixes = sstruct.getformat(Silf_justify_format)
for i, j in enumerate(self.jLevels):
attrs = dict([(k, getattr(j, k)) for k in jnames])
- writer.simpletag('justify', **attrs)
+ writer.simpletag("justify", **attrs)
writer.newline()
- writer.endtag('justifications')
+ writer.endtag("justifications")
writer.newline()
if len(self.critFeatures):
- writer.begintag('critFeatures')
+ writer.begintag("critFeatures")
writer.newline()
writer.write(" ".join(map(str, self.critFeatures)))
writer.newline()
- writer.endtag('critFeatures')
+ writer.endtag("critFeatures")
writer.newline()
if len(self.scriptTags):
- writer.begintag('scriptTags')
+ writer.begintag("scriptTags")
writer.newline()
writer.write(" ".join(self.scriptTags))
writer.newline()
- writer.endtag('scriptTags')
+ writer.endtag("scriptTags")
writer.newline()
if self.pMap:
- writer.begintag('pseudoMap')
+ writer.begintag("pseudoMap")
writer.newline()
for k, v in sorted(self.pMap.items()):
- writer.simpletag('pseudo', unicode=hex(k), pseudo=v)
+ writer.simpletag("pseudo", unicode=hex(k), pseudo=v)
writer.newline()
- writer.endtag('pseudoMap')
+ writer.endtag("pseudoMap")
writer.newline()
self.classes.toXML(writer, ttFont, version)
if len(self.passes):
- writer.begintag('passes')
+ writer.begintag("passes")
writer.newline()
for i, p in enumerate(self.passes):
- writer.begintag('pass', _index=i)
+ writer.begintag("pass", _index=i)
writer.newline()
p.toXML(writer, ttFont, version)
- writer.endtag('pass')
+ writer.endtag("pass")
writer.newline()
- writer.endtag('passes')
+ writer.endtag("passes")
writer.newline()
def fromXML(self, name, attrs, content, ttFont, version=2.0):
- if name == 'version':
- self.ruleVersion = float(safeEval(attrs.get('ruleVersion', "0")))
- if name == 'info':
+ if name == "version":
+ self.ruleVersion = float(safeEval(attrs.get("ruleVersion", "0")))
+ if name == "info":
getSimple(self, attrs, *attrs_info)
- elif name == 'passindexes':
+ elif name == "passindexes":
getSimple(self, attrs, *attrs_passindexes)
- elif name == 'contexts':
+ elif name == "contexts":
getSimple(self, attrs, *attrs_contexts)
- elif name == 'attributes':
+ elif name == "attributes":
getSimple(self, attrs, *attrs_attributes)
- elif name == 'justifications':
+ elif name == "justifications":
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
(tag, attrs, subcontent) = element
- if tag == 'justify':
+ if tag == "justify":
j = _Object()
for k, v in attrs.items():
setattr(j, k, int(v))
self.jLevels.append(j)
- elif name == 'critFeatures':
+ elif name == "critFeatures":
self.critFeatures = []
element = content_string(content)
self.critFeatures.extend(map(int, element.split()))
- elif name == 'scriptTags':
+ elif name == "scriptTags":
self.scriptTags = []
element = content_string(content)
for n in element.split():
self.scriptTags.append(n)
- elif name == 'pseudoMap':
+ elif name == "pseudoMap":
self.pMap = {}
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
(tag, attrs, subcontent) = element
- if tag == 'pseudo':
- k = int(attrs['unicode'], 16)
- v = attrs['pseudo']
+ if tag == "pseudo":
+ k = int(attrs["unicode"], 16)
+ v = attrs["pseudo"]
self.pMap[k] = v
- elif name == 'classes':
+ elif name == "classes":
self.classes = Classes()
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, attrs, subcontent = element
self.classes.fromXML(tag, attrs, subcontent, ttFont, version)
- elif name == 'passes':
+ elif name == "passes":
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, attrs, subcontent = element
- if tag == 'pass':
+ if tag == "pass":
p = Pass()
for e in subcontent:
- if not isinstance(e, tuple): continue
+ if not isinstance(e, tuple):
+ continue
p.fromXML(e[0], e[1], e[2], ttFont, version)
self.passes.append(p)
class Classes(object):
-
def __init__(self):
self.linear = []
self.nonLinear = []
def decompile(self, data, ttFont, version=2.0):
sstruct.unpack2(Silf_classmap_format, data, self)
- if version >= 4.0 :
- oClasses = struct.unpack((">%dL" % (self.numClass+1)),
- data[4:8+4*self.numClass])
+ if version >= 4.0:
+ oClasses = struct.unpack(
+ (">%dL" % (self.numClass + 1)), data[4 : 8 + 4 * self.numClass]
+ )
else:
- oClasses = struct.unpack((">%dH" % (self.numClass+1)),
- data[4:6+2*self.numClass])
- for s,e in zip(oClasses[:self.numLinear], oClasses[1:self.numLinear+1]):
- self.linear.append(ttFont.getGlyphName(x) for x in
- struct.unpack((">%dH" % ((e-s)/2)), data[s:e]))
- for s,e in zip(oClasses[self.numLinear:self.numClass],
- oClasses[self.numLinear+1:self.numClass+1]):
- nonLinids = [struct.unpack(">HH", data[x:x+4]) for x in range(s+8, e, 4)]
+ oClasses = struct.unpack(
+ (">%dH" % (self.numClass + 1)), data[4 : 6 + 2 * self.numClass]
+ )
+ for s, e in zip(oClasses[: self.numLinear], oClasses[1 : self.numLinear + 1]):
+ self.linear.append(
+ ttFont.getGlyphName(x)
+ for x in struct.unpack((">%dH" % ((e - s) / 2)), data[s:e])
+ )
+ for s, e in zip(
+ oClasses[self.numLinear : self.numClass],
+ oClasses[self.numLinear + 1 : self.numClass + 1],
+ ):
+ nonLinids = [
+ struct.unpack(">HH", data[x : x + 4]) for x in range(s + 8, e, 4)
+ ]
nonLin = dict([(ttFont.getGlyphName(x[0]), x[1]) for x in nonLinids])
self.nonLinear.append(nonLin)
@@ -627,61 +709,68 @@ class Classes(object):
oClasses.append(len(data) + offset)
self.numClass = len(oClasses) - 1
self.numLinear = len(self.linear)
- return sstruct.pack(Silf_classmap_format, self) + \
- struct.pack(((">%dL" if version >= 4.0 else ">%dH") % len(oClasses)),
- *oClasses) + data
+ return (
+ sstruct.pack(Silf_classmap_format, self)
+ + struct.pack(
+ ((">%dL" if version >= 4.0 else ">%dH") % len(oClasses)), *oClasses
+ )
+ + data
+ )
def toXML(self, writer, ttFont, version=2.0):
- writer.begintag('classes')
+ writer.begintag("classes")
writer.newline()
- writer.begintag('linearClasses')
+ writer.begintag("linearClasses")
writer.newline()
- for i,l in enumerate(self.linear):
- writer.begintag('linear', _index=i)
+ for i, l in enumerate(self.linear):
+ writer.begintag("linear", _index=i)
writer.newline()
wrapline(writer, l)
- writer.endtag('linear')
+ writer.endtag("linear")
writer.newline()
- writer.endtag('linearClasses')
+ writer.endtag("linearClasses")
writer.newline()
- writer.begintag('nonLinearClasses')
+ writer.begintag("nonLinearClasses")
writer.newline()
for i, l in enumerate(self.nonLinear):
- writer.begintag('nonLinear', _index=i + self.numLinear)
+ writer.begintag("nonLinear", _index=i + self.numLinear)
writer.newline()
for inp, ind in l.items():
- writer.simpletag('map', glyph=inp, index=ind)
+ writer.simpletag("map", glyph=inp, index=ind)
writer.newline()
- writer.endtag('nonLinear')
+ writer.endtag("nonLinear")
writer.newline()
- writer.endtag('nonLinearClasses')
+ writer.endtag("nonLinearClasses")
writer.newline()
- writer.endtag('classes')
+ writer.endtag("classes")
writer.newline()
def fromXML(self, name, attrs, content, ttFont, version=2.0):
- if name == 'linearClasses':
+ if name == "linearClasses":
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, attrs, subcontent = element
- if tag == 'linear':
+ if tag == "linear":
l = content_string(subcontent).split()
self.linear.append(l)
- elif name == 'nonLinearClasses':
+ elif name == "nonLinearClasses":
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, attrs, subcontent = element
- if tag =='nonLinear':
+ if tag == "nonLinear":
l = {}
for e in subcontent:
- if not isinstance(e, tuple): continue
+ if not isinstance(e, tuple):
+ continue
tag, attrs, subsubcontent = e
- if tag == 'map':
- l[attrs['glyph']] = int(safeEval(attrs['index']))
+ if tag == "map":
+ l[attrs["glyph"]] = int(safeEval(attrs["index"]))
self.nonLinear.append(l)
-class Pass(object):
+class Pass(object):
def __init__(self):
self.colMap = {}
self.rules = []
@@ -698,71 +787,109 @@ class Pass(object):
(numRange, _, _, _) = struct.unpack(">4H", data[:8])
data = data[8:]
for i in range(numRange):
- (first, last, col) = struct.unpack(">3H", data[6*i:6*i+6])
- for g in range(first, last+1):
+ (first, last, col) = struct.unpack(">3H", data[6 * i : 6 * i + 6])
+ for g in range(first, last + 1):
self.colMap[ttFont.getGlyphName(g)] = col
- data = data[6*numRange:]
+ data = data[6 * numRange :]
oRuleMap = struct.unpack_from((">%dH" % (self.numSuccess + 1)), data)
- data = data[2+2*self.numSuccess:]
+ data = data[2 + 2 * self.numSuccess :]
rules = struct.unpack_from((">%dH" % oRuleMap[-1]), data)
- self.rules = [rules[s:e] for (s,e) in zip(oRuleMap, oRuleMap[1:])]
- data = data[2*oRuleMap[-1]:]
- (self.minRulePreContext, self.maxRulePreContext) = struct.unpack('BB', data[:2])
+ self.rules = [rules[s:e] for (s, e) in zip(oRuleMap, oRuleMap[1:])]
+ data = data[2 * oRuleMap[-1] :]
+ (self.minRulePreContext, self.maxRulePreContext) = struct.unpack("BB", data[:2])
numStartStates = self.maxRulePreContext - self.minRulePreContext + 1
- self.startStates = struct.unpack((">%dH" % numStartStates),
- data[2:2 + numStartStates * 2])
- data = data[2+numStartStates*2:]
- self.ruleSortKeys = struct.unpack((">%dH" % self.numRules), data[:2 * self.numRules])
- data = data[2*self.numRules:]
- self.rulePreContexts = struct.unpack(("%dB" % self.numRules), data[:self.numRules])
- data = data[self.numRules:]
+ self.startStates = struct.unpack(
+ (">%dH" % numStartStates), data[2 : 2 + numStartStates * 2]
+ )
+ data = data[2 + numStartStates * 2 :]
+ self.ruleSortKeys = struct.unpack(
+ (">%dH" % self.numRules), data[: 2 * self.numRules]
+ )
+ data = data[2 * self.numRules :]
+ self.rulePreContexts = struct.unpack(
+ ("%dB" % self.numRules), data[: self.numRules]
+ )
+ data = data[self.numRules :]
(self.collisionThreshold, pConstraint) = struct.unpack(">BH", data[:3])
- oConstraints = list(struct.unpack((">%dH" % (self.numRules + 1)),
- data[3:5 + self.numRules * 2]))
- data = data[5 + self.numRules * 2:]
- oActions = list(struct.unpack((">%dH" % (self.numRules + 1)),
- data[:2 + self.numRules * 2]))
- data = data[2 * self.numRules + 2:]
+ oConstraints = list(
+ struct.unpack(
+ (">%dH" % (self.numRules + 1)), data[3 : 5 + self.numRules * 2]
+ )
+ )
+ data = data[5 + self.numRules * 2 :]
+ oActions = list(
+ struct.unpack((">%dH" % (self.numRules + 1)), data[: 2 + self.numRules * 2])
+ )
+ data = data[2 * self.numRules + 2 :]
for i in range(self.numTransitional):
- a = array("H", data[i*self.numColumns*2:(i+1)*self.numColumns*2])
- if sys.byteorder != "big": a.byteswap()
+ a = array(
+ "H", data[i * self.numColumns * 2 : (i + 1) * self.numColumns * 2]
+ )
+ if sys.byteorder != "big":
+ a.byteswap()
self.stateTrans.append(a)
- data = data[self.numTransitional * self.numColumns * 2 + 1:]
+ data = data[self.numTransitional * self.numColumns * 2 + 1 :]
self.passConstraints = data[:pConstraint]
data = data[pConstraint:]
- for i in range(len(oConstraints)-2,-1,-1):
- if oConstraints[i] == 0 :
- oConstraints[i] = oConstraints[i+1]
- self.ruleConstraints = [(data[s:e] if (e-s > 1) else b"") for (s,e) in zip(oConstraints, oConstraints[1:])]
- data = data[oConstraints[-1]:]
- self.actions = [(data[s:e] if (e-s > 1) else "") for (s,e) in zip(oActions, oActions[1:])]
- data = data[oActions[-1]:]
+ for i in range(len(oConstraints) - 2, -1, -1):
+ if oConstraints[i] == 0:
+ oConstraints[i] = oConstraints[i + 1]
+ self.ruleConstraints = [
+ (data[s:e] if (e - s > 1) else b"")
+ for (s, e) in zip(oConstraints, oConstraints[1:])
+ ]
+ data = data[oConstraints[-1] :]
+ self.actions = [
+ (data[s:e] if (e - s > 1) else "") for (s, e) in zip(oActions, oActions[1:])
+ ]
+ data = data[oActions[-1] :]
# not using debug
def compile(self, ttFont, base, version=2.0):
# build it all up backwards
- oActions = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.actions + [b""], (0, []))[1]
- oConstraints = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.ruleConstraints + [b""], (1, []))[1]
+ oActions = reduce(
+ lambda a, x: (a[0] + len(x), a[1] + [a[0]]), self.actions + [b""], (0, [])
+ )[1]
+ oConstraints = reduce(
+ lambda a, x: (a[0] + len(x), a[1] + [a[0]]),
+ self.ruleConstraints + [b""],
+ (1, []),
+ )[1]
constraintCode = b"\000" + b"".join(self.ruleConstraints)
transes = []
for t in self.stateTrans:
- if sys.byteorder != "big": t.byteswap()
+ if sys.byteorder != "big":
+ t.byteswap()
transes.append(t.tobytes())
- if sys.byteorder != "big": t.byteswap()
+ if sys.byteorder != "big":
+ t.byteswap()
if not len(transes):
self.startStates = [0]
- oRuleMap = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.rules+[[]], (0, []))[1]
+ oRuleMap = reduce(
+ lambda a, x: (a[0] + len(x), a[1] + [a[0]]), self.rules + [[]], (0, [])
+ )[1]
passRanges = []
gidcolmap = dict([(ttFont.getGlyphID(x[0]), x[1]) for x in self.colMap.items()])
- for e in grUtils.entries(gidcolmap, sameval = True):
+ for e in grUtils.entries(gidcolmap, sameval=True):
if e[1]:
- passRanges.append((e[0], e[0]+e[1]-1, e[2][0]))
+ passRanges.append((e[0], e[0] + e[1] - 1, e[2][0]))
self.numRules = len(self.actions)
- self.fsmOffset = (sstruct.calcsize(Silf_pass_format) + 8 + len(passRanges) * 6
- + len(oRuleMap) * 2 + 2 * oRuleMap[-1] + 2
- + 2 * len(self.startStates) + 3 * self.numRules + 3
- + 4 * self.numRules + 4)
- self.pcCode = self.fsmOffset + 2*self.numTransitional*self.numColumns + 1 + base
+ self.fsmOffset = (
+ sstruct.calcsize(Silf_pass_format)
+ + 8
+ + len(passRanges) * 6
+ + len(oRuleMap) * 2
+ + 2 * oRuleMap[-1]
+ + 2
+ + 2 * len(self.startStates)
+ + 3 * self.numRules
+ + 3
+ + 4 * self.numRules
+ + 4
+ )
+ self.pcCode = (
+ self.fsmOffset + 2 * self.numTransitional * self.numColumns + 1 + base
+ )
self.rcCode = self.pcCode + len(self.passConstraints)
self.aCode = self.rcCode + len(constraintCode)
self.oDebug = 0
@@ -771,115 +898,140 @@ class Pass(object):
data += grUtils.bininfo(len(passRanges), 6)
data += b"".join(struct.pack(">3H", *p) for p in passRanges)
data += struct.pack((">%dH" % len(oRuleMap)), *oRuleMap)
- flatrules = reduce(lambda a,x: a+x, self.rules, [])
+ flatrules = reduce(lambda a, x: a + x, self.rules, [])
data += struct.pack((">%dH" % oRuleMap[-1]), *flatrules)
data += struct.pack("BB", self.minRulePreContext, self.maxRulePreContext)
data += struct.pack((">%dH" % len(self.startStates)), *self.startStates)
data += struct.pack((">%dH" % self.numRules), *self.ruleSortKeys)
data += struct.pack(("%dB" % self.numRules), *self.rulePreContexts)
data += struct.pack(">BH", self.collisionThreshold, len(self.passConstraints))
- data += struct.pack((">%dH" % (self.numRules+1)), *oConstraints)
- data += struct.pack((">%dH" % (self.numRules+1)), *oActions)
- return data + b"".join(transes) + struct.pack("B", 0) + \
- self.passConstraints + constraintCode + b"".join(self.actions)
+ data += struct.pack((">%dH" % (self.numRules + 1)), *oConstraints)
+ data += struct.pack((">%dH" % (self.numRules + 1)), *oActions)
+ return (
+ data
+ + b"".join(transes)
+ + struct.pack("B", 0)
+ + self.passConstraints
+ + constraintCode
+ + b"".join(self.actions)
+ )
def toXML(self, writer, ttFont, version=2.0):
- writesimple('info', self, writer, *pass_attrs_info)
- writesimple('fsminfo', self, writer, *pass_attrs_fsm)
- writer.begintag('colmap')
+ writesimple("info", self, writer, *pass_attrs_info)
+ writesimple("fsminfo", self, writer, *pass_attrs_fsm)
+ writer.begintag("colmap")
writer.newline()
- wrapline(writer, ["{}={}".format(*x) for x in sorted(self.colMap.items(),
- key=lambda x:ttFont.getGlyphID(x[0]))])
- writer.endtag('colmap')
+ wrapline(
+ writer,
+ [
+ "{}={}".format(*x)
+ for x in sorted(
+ self.colMap.items(), key=lambda x: ttFont.getGlyphID(x[0])
+ )
+ ],
+ )
+ writer.endtag("colmap")
writer.newline()
- writer.begintag('staterulemap')
+ writer.begintag("staterulemap")
writer.newline()
for i, r in enumerate(self.rules):
- writer.simpletag('state', number = self.numRows - self.numSuccess + i,
- rules = " ".join(map(str, r)))
+ writer.simpletag(
+ "state",
+ number=self.numRows - self.numSuccess + i,
+ rules=" ".join(map(str, r)),
+ )
writer.newline()
- writer.endtag('staterulemap')
+ writer.endtag("staterulemap")
writer.newline()
- writer.begintag('rules')
+ writer.begintag("rules")
writer.newline()
for i in range(len(self.actions)):
- writer.begintag('rule', index=i, precontext=self.rulePreContexts[i],
- sortkey=self.ruleSortKeys[i])
+ writer.begintag(
+ "rule",
+ index=i,
+ precontext=self.rulePreContexts[i],
+ sortkey=self.ruleSortKeys[i],
+ )
writer.newline()
if len(self.ruleConstraints[i]):
- writecode('constraint', writer, self.ruleConstraints[i])
- writecode('action', writer, self.actions[i])
- writer.endtag('rule')
+ writecode("constraint", writer, self.ruleConstraints[i])
+ writecode("action", writer, self.actions[i])
+ writer.endtag("rule")
writer.newline()
- writer.endtag('rules')
+ writer.endtag("rules")
writer.newline()
if len(self.passConstraints):
- writecode('passConstraint', writer, self.passConstraints)
+ writecode("passConstraint", writer, self.passConstraints)
if len(self.stateTrans):
- writer.begintag('fsm')
+ writer.begintag("fsm")
writer.newline()
- writer.begintag('starts')
+ writer.begintag("starts")
writer.write(" ".join(map(str, self.startStates)))
- writer.endtag('starts')
+ writer.endtag("starts")
writer.newline()
for i, s in enumerate(self.stateTrans):
- writer.begintag('row', _i=i)
+ writer.begintag("row", _i=i)
# no newlines here
writer.write(" ".join(map(str, s)))
- writer.endtag('row')
+ writer.endtag("row")
writer.newline()
- writer.endtag('fsm')
+ writer.endtag("fsm")
writer.newline()
def fromXML(self, name, attrs, content, ttFont, version=2.0):
- if name == 'info':
+ if name == "info":
getSimple(self, attrs, *pass_attrs_info)
- elif name == 'fsminfo':
+ elif name == "fsminfo":
getSimple(self, attrs, *pass_attrs_fsm)
- elif name == 'colmap':
+ elif name == "colmap":
e = content_string(content)
for w in e.split():
- x = w.split('=')
- if len(x) != 2 or x[0] == '' or x[1] == '': continue
+ x = w.split("=")
+ if len(x) != 2 or x[0] == "" or x[1] == "":
+ continue
self.colMap[x[0]] = int(x[1])
- elif name == 'staterulemap':
+ elif name == "staterulemap":
for e in content:
- if not isinstance(e, tuple): continue
+ if not isinstance(e, tuple):
+ continue
tag, a, c = e
- if tag == 'state':
- self.rules.append([int(x) for x in a['rules'].split(" ")])
- elif name == 'rules':
+ if tag == "state":
+ self.rules.append([int(x) for x in a["rules"].split(" ")])
+ elif name == "rules":
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, a, c = element
- if tag != 'rule': continue
- self.rulePreContexts.append(int(a['precontext']))
- self.ruleSortKeys.append(int(a['sortkey']))
+ if tag != "rule":
+ continue
+ self.rulePreContexts.append(int(a["precontext"]))
+ self.ruleSortKeys.append(int(a["sortkey"]))
con = b""
act = b""
for e in c:
- if not isinstance(e, tuple): continue
+ if not isinstance(e, tuple):
+ continue
tag, a, subc = e
- if tag == 'constraint':
+ if tag == "constraint":
con = readcode(subc)
- elif tag == 'action':
+ elif tag == "action":
act = readcode(subc)
self.actions.append(act)
self.ruleConstraints.append(con)
- elif name == 'passConstraint':
+ elif name == "passConstraint":
self.passConstraints = readcode(content)
- elif name == 'fsm':
+ elif name == "fsm":
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, a, c = element
- if tag == 'row':
- s = array('H')
+ if tag == "row":
+ s = array("H")
e = content_string(c)
s.extend(map(int, e.split()))
self.stateTrans.append(s)
- elif tag == 'starts':
+ elif tag == "starts":
s = []
e = content_string(c)
s.extend(map(int, e.split()))
self.startStates = s
-
diff --git a/Lib/fontTools/ttLib/tables/S__i_l_l.py b/Lib/fontTools/ttLib/tables/S__i_l_l.py
index 5ab9ee34..12b0b8f6 100644
--- a/Lib/fontTools/ttLib/tables/S__i_l_l.py
+++ b/Lib/fontTools/ttLib/tables/S__i_l_l.py
@@ -5,13 +5,13 @@ from . import DefaultTable
from . import grUtils
import struct
-Sill_hdr = '''
+Sill_hdr = """
>
version: 16.16F
-'''
+"""
-class table_S__i_l_l(DefaultTable.DefaultTable):
+class table_S__i_l_l(DefaultTable.DefaultTable):
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.langs = {}
@@ -19,26 +19,27 @@ class table_S__i_l_l(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
(_, data) = sstruct.unpack2(Sill_hdr, data, self)
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
- numLangs, = struct.unpack('>H', data[:2])
+ (numLangs,) = struct.unpack(">H", data[:2])
data = data[8:]
maxsetting = 0
langinfo = []
for i in range(numLangs):
- (langcode, numsettings, offset) = struct.unpack(">4sHH",
- data[i * 8:(i+1) * 8])
+ (langcode, numsettings, offset) = struct.unpack(
+ ">4sHH", data[i * 8 : (i + 1) * 8]
+ )
offset = int(offset / 8) - (numLangs + 1)
- langcode = langcode.replace(b'\000', b'')
+ langcode = langcode.replace(b"\000", b"")
langinfo.append((langcode.decode("utf-8"), numsettings, offset))
maxsetting = max(maxsetting, offset + numsettings)
- data = data[numLangs * 8:]
+ data = data[numLangs * 8 :]
finfo = []
for i in range(maxsetting):
- (fid, val, _) = struct.unpack(">LHH", data[i * 8:(i+1) * 8])
+ (fid, val, _) = struct.unpack(">LHH", data[i * 8 : (i + 1) * 8])
finfo.append((fid, val))
self.langs = {}
for c, n, o in langinfo:
self.langs[c] = []
- for i in range(o, o+n):
+ for i in range(o, o + n):
self.langs[c].append(finfo[i])
def compile(self, ttFont):
@@ -46,35 +47,41 @@ class table_S__i_l_l(DefaultTable.DefaultTable):
fdat = b""
offset = len(self.langs)
for c, inf in sorted(self.langs.items()):
- ldat += struct.pack(">4sHH", c.encode('utf8'), len(inf), 8 * offset + 20)
+ ldat += struct.pack(">4sHH", c.encode("utf8"), len(inf), 8 * offset + 20)
for fid, val in inf:
fdat += struct.pack(">LHH", fid, val, 0)
offset += len(inf)
ldat += struct.pack(">LHH", 0x80808080, 0, 8 * offset + 20)
- return sstruct.pack(Sill_hdr, self) + grUtils.bininfo(len(self.langs)) + \
- ldat + fdat
+ return (
+ sstruct.pack(Sill_hdr, self)
+ + grUtils.bininfo(len(self.langs))
+ + ldat
+ + fdat
+ )
def toXML(self, writer, ttFont):
- writer.simpletag('version', version=self.version)
+ writer.simpletag("version", version=self.version)
writer.newline()
for c, inf in sorted(self.langs.items()):
- writer.begintag('lang', name=c)
+ writer.begintag("lang", name=c)
writer.newline()
for fid, val in inf:
- writer.simpletag('feature', fid=grUtils.num2tag(fid), val=val)
+ writer.simpletag("feature", fid=grUtils.num2tag(fid), val=val)
writer.newline()
- writer.endtag('lang')
+ writer.endtag("lang")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
- if name == 'version':
- self.version = float(safeEval(attrs['version']))
- elif name == 'lang':
- c = attrs['name']
+ if name == "version":
+ self.version = float(safeEval(attrs["version"]))
+ elif name == "lang":
+ c = attrs["name"]
self.langs[c] = []
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, a, subcontent = element
- if tag == 'feature':
- self.langs[c].append((grUtils.tag2num(a['fid']),
- int(safeEval(a['val']))))
+ if tag == "feature":
+ self.langs[c].append(
+ (grUtils.tag2num(a["fid"]), int(safeEval(a["val"])))
+ )
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_B_.py b/Lib/fontTools/ttLib/tables/T_S_I_B_.py
index 25d43104..8a6c14c4 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I_B_.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I_B_.py
@@ -1,4 +1,5 @@
from .T_S_I_V_ import table_T_S_I_V_
+
class table_T_S_I_B_(table_T_S_I_V_):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_D_.py b/Lib/fontTools/ttLib/tables/T_S_I_D_.py
index 310eb174..536ff2f9 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I_D_.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I_D_.py
@@ -1,4 +1,5 @@
from .T_S_I_V_ import table_T_S_I_V_
+
class table_T_S_I_D_(table_T_S_I_V_):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_J_.py b/Lib/fontTools/ttLib/tables/T_S_I_J_.py
index c1a46ba6..bc8fe92a 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I_J_.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I_J_.py
@@ -1,4 +1,5 @@
from .T_S_I_V_ import table_T_S_I_V_
+
class table_T_S_I_J_(table_T_S_I_V_):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_P_.py b/Lib/fontTools/ttLib/tables/T_S_I_P_.py
index 778974c8..1abc0259 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I_P_.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I_P_.py
@@ -1,4 +1,5 @@
from .T_S_I_V_ import table_T_S_I_V_
+
class table_T_S_I_P_(table_T_S_I_V_):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_S_.py b/Lib/fontTools/ttLib/tables/T_S_I_S_.py
index 61c9f76f..667eb0e5 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I_S_.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I_S_.py
@@ -1,4 +1,5 @@
from .T_S_I_V_ import table_T_S_I_V_
+
class table_T_S_I_S_(table_T_S_I_V_):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_V_.py b/Lib/fontTools/ttLib/tables/T_S_I_V_.py
index c1e244c6..d7aec458 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I_V_.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I_V_.py
@@ -1,20 +1,20 @@
from fontTools.misc.textTools import strjoin, tobytes, tostr
from . import asciiTable
-class table_T_S_I_V_(asciiTable.asciiTable):
- def toXML(self, writer, ttFont):
- data = tostr(self.data)
- # removing null bytes. XXX needed??
- data = data.split('\0')
- data = strjoin(data)
- writer.begintag("source")
- writer.newline()
- writer.write_noindent(data.replace("\r", "\n"))
- writer.newline()
- writer.endtag("source")
- writer.newline()
+class table_T_S_I_V_(asciiTable.asciiTable):
+ def toXML(self, writer, ttFont):
+ data = tostr(self.data)
+ # removing null bytes. XXX needed??
+ data = data.split("\0")
+ data = strjoin(data)
+ writer.begintag("source")
+ writer.newline()
+ writer.write_noindent(data.replace("\r", "\n"))
+ writer.newline()
+ writer.endtag("source")
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- lines = strjoin(content).split("\n")
- self.data = tobytes("\r".join(lines[1:-1]))
+ def fromXML(self, name, attrs, content, ttFont):
+ lines = strjoin(content).split("\n")
+ self.data = tobytes("\r".join(lines[1:-1]))
diff --git a/Lib/fontTools/ttLib/tables/T_S_I__0.py b/Lib/fontTools/ttLib/tables/T_S_I__0.py
index b187f425..f15fc67b 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I__0.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I__0.py
@@ -8,47 +8,49 @@ in the TSI1 table.
from . import DefaultTable
import struct
-tsi0Format = '>HHL'
+tsi0Format = ">HHL"
+
def fixlongs(glyphID, textLength, textOffset):
- return int(glyphID), int(textLength), textOffset
+ return int(glyphID), int(textLength), textOffset
class table_T_S_I__0(DefaultTable.DefaultTable):
-
- dependencies = ["TSI1"]
-
- def decompile(self, data, ttFont):
- numGlyphs = ttFont['maxp'].numGlyphs
- indices = []
- size = struct.calcsize(tsi0Format)
- for i in range(numGlyphs + 5):
- glyphID, textLength, textOffset = fixlongs(*struct.unpack(tsi0Format, data[:size]))
- indices.append((glyphID, textLength, textOffset))
- data = data[size:]
- assert len(data) == 0
- assert indices[-5] == (0XFFFE, 0, 0xABFC1F34), "bad magic number"
- self.indices = indices[:-5]
- self.extra_indices = indices[-4:]
-
- def compile(self, ttFont):
- if not hasattr(self, "indices"):
- # We have no corresponding table (TSI1 or TSI3); let's return
- # no data, which effectively means "ignore us".
- return b""
- data = b""
- for index, textLength, textOffset in self.indices:
- data = data + struct.pack(tsi0Format, index, textLength, textOffset)
- data = data + struct.pack(tsi0Format, 0XFFFE, 0, 0xABFC1F34)
- for index, textLength, textOffset in self.extra_indices:
- data = data + struct.pack(tsi0Format, index, textLength, textOffset)
- return data
-
- def set(self, indices, extra_indices):
- # gets called by 'TSI1' or 'TSI3'
- self.indices = indices
- self.extra_indices = extra_indices
-
- def toXML(self, writer, ttFont):
- writer.comment("This table will be calculated by the compiler")
- writer.newline()
+ dependencies = ["TSI1"]
+
+ def decompile(self, data, ttFont):
+ numGlyphs = ttFont["maxp"].numGlyphs
+ indices = []
+ size = struct.calcsize(tsi0Format)
+ for i in range(numGlyphs + 5):
+ glyphID, textLength, textOffset = fixlongs(
+ *struct.unpack(tsi0Format, data[:size])
+ )
+ indices.append((glyphID, textLength, textOffset))
+ data = data[size:]
+ assert len(data) == 0
+ assert indices[-5] == (0xFFFE, 0, 0xABFC1F34), "bad magic number"
+ self.indices = indices[:-5]
+ self.extra_indices = indices[-4:]
+
+ def compile(self, ttFont):
+ if not hasattr(self, "indices"):
+ # We have no corresponding table (TSI1 or TSI3); let's return
+ # no data, which effectively means "ignore us".
+ return b""
+ data = b""
+ for index, textLength, textOffset in self.indices:
+ data = data + struct.pack(tsi0Format, index, textLength, textOffset)
+ data = data + struct.pack(tsi0Format, 0xFFFE, 0, 0xABFC1F34)
+ for index, textLength, textOffset in self.extra_indices:
+ data = data + struct.pack(tsi0Format, index, textLength, textOffset)
+ return data
+
+ def set(self, indices, extra_indices):
+ # gets called by 'TSI1' or 'TSI3'
+ self.indices = indices
+ self.extra_indices = extra_indices
+
+ def toXML(self, writer, ttFont):
+ writer.comment("This table will be calculated by the compiler")
+ writer.newline()
diff --git a/Lib/fontTools/ttLib/tables/T_S_I__1.py b/Lib/fontTools/ttLib/tables/T_S_I__1.py
index 7f7608b2..55aca339 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I__1.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I__1.py
@@ -10,147 +10,154 @@ from fontTools.misc.textTools import strjoin, tobytes, tostr
class table_T_S_I__1(LogMixin, DefaultTable.DefaultTable):
+ extras = {0xFFFA: "ppgm", 0xFFFB: "cvt", 0xFFFC: "reserved", 0xFFFD: "fpgm"}
- extras = {0xfffa: "ppgm", 0xfffb: "cvt", 0xfffc: "reserved", 0xfffd: "fpgm"}
+ indextable = "TSI0"
- indextable = "TSI0"
+ def decompile(self, data, ttFont):
+ totalLength = len(data)
+ indextable = ttFont[self.indextable]
+ for indices, isExtra in zip(
+ (indextable.indices, indextable.extra_indices), (False, True)
+ ):
+ programs = {}
+ for i, (glyphID, textLength, textOffset) in enumerate(indices):
+ if isExtra:
+ name = self.extras[glyphID]
+ else:
+ name = ttFont.getGlyphName(glyphID)
+ if textOffset > totalLength:
+ self.log.warning("textOffset > totalLength; %r skipped" % name)
+ continue
+ if textLength < 0x8000:
+ # If the length stored in the record is less than 32768, then use
+ # that as the length of the record.
+ pass
+ elif textLength == 0x8000:
+ # If the length is 32768, compute the actual length as follows:
+ isLast = i == (len(indices) - 1)
+ if isLast:
+ if isExtra:
+ # For the last "extra" record (the very last record of the
+ # table), the length is the difference between the total
+ # length of the TSI1 table and the textOffset of the final
+ # record.
+ nextTextOffset = totalLength
+ else:
+ # For the last "normal" record (the last record just prior
+ # to the record containing the "magic number"), the length
+ # is the difference between the textOffset of the record
+ # following the "magic number" (0xFFFE) record (i.e. the
+ # first "extra" record), and the textOffset of the last
+ # "normal" record.
+ nextTextOffset = indextable.extra_indices[0][2]
+ else:
+ # For all other records with a length of 0x8000, the length is
+ # the difference between the textOffset of the record in
+ # question and the textOffset of the next record.
+ nextTextOffset = indices[i + 1][2]
+ assert nextTextOffset >= textOffset, "entries not sorted by offset"
+ if nextTextOffset > totalLength:
+ self.log.warning(
+ "nextTextOffset > totalLength; %r truncated" % name
+ )
+ nextTextOffset = totalLength
+ textLength = nextTextOffset - textOffset
+ else:
+ from fontTools import ttLib
- def decompile(self, data, ttFont):
- totalLength = len(data)
- indextable = ttFont[self.indextable]
- for indices, isExtra in zip(
- (indextable.indices, indextable.extra_indices), (False, True)):
- programs = {}
- for i, (glyphID, textLength, textOffset) in enumerate(indices):
- if isExtra:
- name = self.extras[glyphID]
- else:
- name = ttFont.getGlyphName(glyphID)
- if textOffset > totalLength:
- self.log.warning("textOffset > totalLength; %r skipped" % name)
- continue
- if textLength < 0x8000:
- # If the length stored in the record is less than 32768, then use
- # that as the length of the record.
- pass
- elif textLength == 0x8000:
- # If the length is 32768, compute the actual length as follows:
- isLast = i == (len(indices)-1)
- if isLast:
- if isExtra:
- # For the last "extra" record (the very last record of the
- # table), the length is the difference between the total
- # length of the TSI1 table and the textOffset of the final
- # record.
- nextTextOffset = totalLength
- else:
- # For the last "normal" record (the last record just prior
- # to the record containing the "magic number"), the length
- # is the difference between the textOffset of the record
- # following the "magic number" (0xFFFE) record (i.e. the
- # first "extra" record), and the textOffset of the last
- # "normal" record.
- nextTextOffset = indextable.extra_indices[0][2]
- else:
- # For all other records with a length of 0x8000, the length is
- # the difference between the textOffset of the record in
- # question and the textOffset of the next record.
- nextTextOffset = indices[i+1][2]
- assert nextTextOffset >= textOffset, "entries not sorted by offset"
- if nextTextOffset > totalLength:
- self.log.warning(
- "nextTextOffset > totalLength; %r truncated" % name)
- nextTextOffset = totalLength
- textLength = nextTextOffset - textOffset
- else:
- from fontTools import ttLib
- raise ttLib.TTLibError(
- "%r textLength (%d) must not be > 32768" % (name, textLength))
- text = data[textOffset:textOffset+textLength]
- assert len(text) == textLength
- text = tostr(text, encoding='utf-8')
- if text:
- programs[name] = text
- if isExtra:
- self.extraPrograms = programs
- else:
- self.glyphPrograms = programs
+ raise ttLib.TTLibError(
+ "%r textLength (%d) must not be > 32768" % (name, textLength)
+ )
+ text = data[textOffset : textOffset + textLength]
+ assert len(text) == textLength
+ text = tostr(text, encoding="utf-8")
+ if text:
+ programs[name] = text
+ if isExtra:
+ self.extraPrograms = programs
+ else:
+ self.glyphPrograms = programs
- def compile(self, ttFont):
- if not hasattr(self, "glyphPrograms"):
- self.glyphPrograms = {}
- self.extraPrograms = {}
- data = b''
- indextable = ttFont[self.indextable]
- glyphNames = ttFont.getGlyphOrder()
+ def compile(self, ttFont):
+ if not hasattr(self, "glyphPrograms"):
+ self.glyphPrograms = {}
+ self.extraPrograms = {}
+ data = b""
+ indextable = ttFont[self.indextable]
+ glyphNames = ttFont.getGlyphOrder()
- indices = []
- for i in range(len(glyphNames)):
- if len(data) % 2:
- data = data + b"\015" # align on 2-byte boundaries, fill with return chars. Yum.
- name = glyphNames[i]
- if name in self.glyphPrograms:
- text = tobytes(self.glyphPrograms[name], encoding="utf-8")
- else:
- text = b""
- textLength = len(text)
- if textLength >= 0x8000:
- textLength = 0x8000
- indices.append((i, textLength, len(data)))
- data = data + text
+ indices = []
+ for i in range(len(glyphNames)):
+ if len(data) % 2:
+ data = (
+ data + b"\015"
+ ) # align on 2-byte boundaries, fill with return chars. Yum.
+ name = glyphNames[i]
+ if name in self.glyphPrograms:
+ text = tobytes(self.glyphPrograms[name], encoding="utf-8")
+ else:
+ text = b""
+ textLength = len(text)
+ if textLength >= 0x8000:
+ textLength = 0x8000
+ indices.append((i, textLength, len(data)))
+ data = data + text
- extra_indices = []
- codes = sorted(self.extras.items())
- for i in range(len(codes)):
- if len(data) % 2:
- data = data + b"\015" # align on 2-byte boundaries, fill with return chars.
- code, name = codes[i]
- if name in self.extraPrograms:
- text = tobytes(self.extraPrograms[name], encoding="utf-8")
- else:
- text = b""
- textLength = len(text)
- if textLength >= 0x8000:
- textLength = 0x8000
- extra_indices.append((code, textLength, len(data)))
- data = data + text
- indextable.set(indices, extra_indices)
- return data
+ extra_indices = []
+ codes = sorted(self.extras.items())
+ for i in range(len(codes)):
+ if len(data) % 2:
+ data = (
+ data + b"\015"
+ ) # align on 2-byte boundaries, fill with return chars.
+ code, name = codes[i]
+ if name in self.extraPrograms:
+ text = tobytes(self.extraPrograms[name], encoding="utf-8")
+ else:
+ text = b""
+ textLength = len(text)
+ if textLength >= 0x8000:
+ textLength = 0x8000
+ extra_indices.append((code, textLength, len(data)))
+ data = data + text
+ indextable.set(indices, extra_indices)
+ return data
- def toXML(self, writer, ttFont):
- names = sorted(self.glyphPrograms.keys())
- writer.newline()
- for name in names:
- text = self.glyphPrograms[name]
- if not text:
- continue
- writer.begintag("glyphProgram", name=name)
- writer.newline()
- writer.write_noindent(text.replace("\r", "\n"))
- writer.newline()
- writer.endtag("glyphProgram")
- writer.newline()
- writer.newline()
- extra_names = sorted(self.extraPrograms.keys())
- for name in extra_names:
- text = self.extraPrograms[name]
- if not text:
- continue
- writer.begintag("extraProgram", name=name)
- writer.newline()
- writer.write_noindent(text.replace("\r", "\n"))
- writer.newline()
- writer.endtag("extraProgram")
- writer.newline()
- writer.newline()
+ def toXML(self, writer, ttFont):
+ names = sorted(self.glyphPrograms.keys())
+ writer.newline()
+ for name in names:
+ text = self.glyphPrograms[name]
+ if not text:
+ continue
+ writer.begintag("glyphProgram", name=name)
+ writer.newline()
+ writer.write_noindent(text.replace("\r", "\n"))
+ writer.newline()
+ writer.endtag("glyphProgram")
+ writer.newline()
+ writer.newline()
+ extra_names = sorted(self.extraPrograms.keys())
+ for name in extra_names:
+ text = self.extraPrograms[name]
+ if not text:
+ continue
+ writer.begintag("extraProgram", name=name)
+ writer.newline()
+ writer.write_noindent(text.replace("\r", "\n"))
+ writer.newline()
+ writer.endtag("extraProgram")
+ writer.newline()
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- if not hasattr(self, "glyphPrograms"):
- self.glyphPrograms = {}
- self.extraPrograms = {}
- lines = strjoin(content).replace("\r", "\n").split("\n")
- text = '\r'.join(lines[1:-1])
- if name == "glyphProgram":
- self.glyphPrograms[attrs["name"]] = text
- elif name == "extraProgram":
- self.extraPrograms[attrs["name"]] = text
+ def fromXML(self, name, attrs, content, ttFont):
+ if not hasattr(self, "glyphPrograms"):
+ self.glyphPrograms = {}
+ self.extraPrograms = {}
+ lines = strjoin(content).replace("\r", "\n").split("\n")
+ text = "\r".join(lines[1:-1])
+ if name == "glyphProgram":
+ self.glyphPrograms[attrs["name"]] = text
+ elif name == "extraProgram":
+ self.extraPrograms[attrs["name"]] = text
diff --git a/Lib/fontTools/ttLib/tables/T_S_I__2.py b/Lib/fontTools/ttLib/tables/T_S_I__2.py
index 036c9815..4278be15 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I__2.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I__2.py
@@ -9,6 +9,6 @@ from fontTools import ttLib
superclass = ttLib.getTableClass("TSI0")
-class table_T_S_I__2(superclass):
- dependencies = ["TSI3"]
+class table_T_S_I__2(superclass):
+ dependencies = ["TSI3"]
diff --git a/Lib/fontTools/ttLib/tables/T_S_I__3.py b/Lib/fontTools/ttLib/tables/T_S_I__3.py
index a2490142..785ca231 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I__3.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I__3.py
@@ -7,8 +7,13 @@ from fontTools import ttLib
superclass = ttLib.getTableClass("TSI1")
-class table_T_S_I__3(superclass):
- extras = {0xfffa: "reserved0", 0xfffb: "reserved1", 0xfffc: "reserved2", 0xfffd: "reserved3"}
+class table_T_S_I__3(superclass):
+ extras = {
+ 0xFFFA: "reserved0",
+ 0xFFFB: "reserved1",
+ 0xFFFC: "reserved2",
+ 0xFFFD: "reserved3",
+ }
- indextable = "TSI2"
+ indextable = "TSI2"
diff --git a/Lib/fontTools/ttLib/tables/T_S_I__5.py b/Lib/fontTools/ttLib/tables/T_S_I__5.py
index 7be09f9a..5edc86a9 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I__5.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I__5.py
@@ -10,34 +10,37 @@ import array
class table_T_S_I__5(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ numGlyphs = ttFont["maxp"].numGlyphs
+ assert len(data) == 2 * numGlyphs
+ a = array.array("H")
+ a.frombytes(data)
+ if sys.byteorder != "big":
+ a.byteswap()
+ self.glyphGrouping = {}
+ for i in range(numGlyphs):
+ self.glyphGrouping[ttFont.getGlyphName(i)] = a[i]
- def decompile(self, data, ttFont):
- numGlyphs = ttFont['maxp'].numGlyphs
- assert len(data) == 2 * numGlyphs
- a = array.array("H")
- a.frombytes(data)
- if sys.byteorder != "big": a.byteswap()
- self.glyphGrouping = {}
- for i in range(numGlyphs):
- self.glyphGrouping[ttFont.getGlyphName(i)] = a[i]
+ def compile(self, ttFont):
+ glyphNames = ttFont.getGlyphOrder()
+ a = array.array("H")
+ for i in range(len(glyphNames)):
+ a.append(self.glyphGrouping.get(glyphNames[i], 0))
+ if sys.byteorder != "big":
+ a.byteswap()
+ return a.tobytes()
- def compile(self, ttFont):
- glyphNames = ttFont.getGlyphOrder()
- a = array.array("H")
- for i in range(len(glyphNames)):
- a.append(self.glyphGrouping.get(glyphNames[i], 0))
- if sys.byteorder != "big": a.byteswap()
- return a.tobytes()
+ def toXML(self, writer, ttFont):
+ names = sorted(self.glyphGrouping.keys())
+ for glyphName in names:
+ writer.simpletag(
+ "glyphgroup", name=glyphName, value=self.glyphGrouping[glyphName]
+ )
+ writer.newline()
- def toXML(self, writer, ttFont):
- names = sorted(self.glyphGrouping.keys())
- for glyphName in names:
- writer.simpletag("glyphgroup", name=glyphName, value=self.glyphGrouping[glyphName])
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if not hasattr(self, "glyphGrouping"):
- self.glyphGrouping = {}
- if name != "glyphgroup":
- return
- self.glyphGrouping[attrs["name"]] = safeEval(attrs["value"])
+ def fromXML(self, name, attrs, content, ttFont):
+ if not hasattr(self, "glyphGrouping"):
+ self.glyphGrouping = {}
+ if name != "glyphgroup":
+ return
+ self.glyphGrouping[attrs["name"]] = safeEval(attrs["value"])
diff --git a/Lib/fontTools/ttLib/tables/T_T_F_A_.py b/Lib/fontTools/ttLib/tables/T_T_F_A_.py
index 8446dfc5..e3cf2db2 100644
--- a/Lib/fontTools/ttLib/tables/T_T_F_A_.py
+++ b/Lib/fontTools/ttLib/tables/T_T_F_A_.py
@@ -1,4 +1,5 @@
from . import asciiTable
+
class table_T_T_F_A_(asciiTable.asciiTable):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/TupleVariation.py b/Lib/fontTools/ttLib/tables/TupleVariation.py
index 9c2895e4..30d00990 100644
--- a/Lib/fontTools/ttLib/tables/TupleVariation.py
+++ b/Lib/fontTools/ttLib/tables/TupleVariation.py
@@ -22,751 +22,787 @@ PRIVATE_POINT_NUMBERS = 0x2000
DELTAS_ARE_ZERO = 0x80
DELTAS_ARE_WORDS = 0x40
-DELTA_RUN_COUNT_MASK = 0x3f
+DELTA_RUN_COUNT_MASK = 0x3F
POINTS_ARE_WORDS = 0x80
-POINT_RUN_COUNT_MASK = 0x7f
+POINT_RUN_COUNT_MASK = 0x7F
TUPLES_SHARE_POINT_NUMBERS = 0x8000
-TUPLE_COUNT_MASK = 0x0fff
-TUPLE_INDEX_MASK = 0x0fff
+TUPLE_COUNT_MASK = 0x0FFF
+TUPLE_INDEX_MASK = 0x0FFF
log = logging.getLogger(__name__)
class TupleVariation(object):
-
- def __init__(self, axes, coordinates):
- self.axes = axes.copy()
- self.coordinates = list(coordinates)
-
- def __repr__(self):
- axes = ",".join(sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()]))
- return "<TupleVariation %s %s>" % (axes, self.coordinates)
-
- def __eq__(self, other):
- return self.coordinates == other.coordinates and self.axes == other.axes
-
- def getUsedPoints(self):
- # Empty set means "all points used".
- if None not in self.coordinates:
- return frozenset()
- used = frozenset([i for i,p in enumerate(self.coordinates) if p is not None])
- # Return None if no points used.
- return used if used else None
-
- def hasImpact(self):
- """Returns True if this TupleVariation has any visible impact.
-
- If the result is False, the TupleVariation can be omitted from the font
- without making any visible difference.
- """
- return any(c is not None for c in self.coordinates)
-
- def toXML(self, writer, axisTags):
- writer.begintag("tuple")
- writer.newline()
- for axis in axisTags:
- value = self.axes.get(axis)
- if value is not None:
- minValue, value, maxValue = value
- defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
- defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
- if minValue == defaultMinValue and maxValue == defaultMaxValue:
- writer.simpletag("coord", axis=axis, value=fl2str(value, 14))
- else:
- attrs = [
- ("axis", axis),
- ("min", fl2str(minValue, 14)),
- ("value", fl2str(value, 14)),
- ("max", fl2str(maxValue, 14)),
- ]
- writer.simpletag("coord", attrs)
- writer.newline()
- wrote_any_deltas = False
- for i, delta in enumerate(self.coordinates):
- if type(delta) == tuple and len(delta) == 2:
- writer.simpletag("delta", pt=i, x=delta[0], y=delta[1])
- writer.newline()
- wrote_any_deltas = True
- elif type(delta) == int:
- writer.simpletag("delta", cvt=i, value=delta)
- writer.newline()
- wrote_any_deltas = True
- elif delta is not None:
- log.error("bad delta format")
- writer.comment("bad delta #%d" % i)
- writer.newline()
- wrote_any_deltas = True
- if not wrote_any_deltas:
- writer.comment("no deltas")
- writer.newline()
- writer.endtag("tuple")
- writer.newline()
-
- def fromXML(self, name, attrs, _content):
- if name == "coord":
- axis = attrs["axis"]
- value = str2fl(attrs["value"], 14)
- defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
- defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
- minValue = str2fl(attrs.get("min", defaultMinValue), 14)
- maxValue = str2fl(attrs.get("max", defaultMaxValue), 14)
- self.axes[axis] = (minValue, value, maxValue)
- elif name == "delta":
- if "pt" in attrs:
- point = safeEval(attrs["pt"])
- x = safeEval(attrs["x"])
- y = safeEval(attrs["y"])
- self.coordinates[point] = (x, y)
- elif "cvt" in attrs:
- cvt = safeEval(attrs["cvt"])
- value = safeEval(attrs["value"])
- self.coordinates[cvt] = value
- else:
- log.warning("bad delta format: %s" %
- ", ".join(sorted(attrs.keys())))
-
- def compile(self, axisTags, sharedCoordIndices={}, pointData=None):
- assert set(self.axes.keys()) <= set(axisTags), ("Unknown axis tag found.", self.axes.keys(), axisTags)
-
- tupleData = []
- auxData = []
-
- if pointData is None:
- usedPoints = self.getUsedPoints()
- if usedPoints is None: # Nothing to encode
- return b'', b''
- pointData = self.compilePoints(usedPoints)
-
- coord = self.compileCoord(axisTags)
- flags = sharedCoordIndices.get(coord)
- if flags is None:
- flags = EMBEDDED_PEAK_TUPLE
- tupleData.append(coord)
-
- intermediateCoord = self.compileIntermediateCoord(axisTags)
- if intermediateCoord is not None:
- flags |= INTERMEDIATE_REGION
- tupleData.append(intermediateCoord)
-
- # pointData of b'' implies "use shared points".
- if pointData:
- flags |= PRIVATE_POINT_NUMBERS
- auxData.append(pointData)
-
- auxData.append(self.compileDeltas())
- auxData = b''.join(auxData)
-
- tupleData.insert(0, struct.pack('>HH', len(auxData), flags))
- return b''.join(tupleData), auxData
-
- def compileCoord(self, axisTags):
- result = bytearray()
- axes = self.axes
- for axis in axisTags:
- triple = axes.get(axis)
- if triple is None:
- result.extend(b'\0\0')
- else:
- result.extend(struct.pack(">h", fl2fi(triple[1], 14)))
- return bytes(result)
-
- def compileIntermediateCoord(self, axisTags):
- needed = False
- for axis in axisTags:
- minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
- defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
- defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
- if (minValue != defaultMinValue) or (maxValue != defaultMaxValue):
- needed = True
- break
- if not needed:
- return None
- minCoords = bytearray()
- maxCoords = bytearray()
- for axis in axisTags:
- minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
- minCoords.extend(struct.pack(">h", fl2fi(minValue, 14)))
- maxCoords.extend(struct.pack(">h", fl2fi(maxValue, 14)))
- return minCoords + maxCoords
-
- @staticmethod
- def decompileCoord_(axisTags, data, offset):
- coord = {}
- pos = offset
- for axis in axisTags:
- coord[axis] = fi2fl(struct.unpack(">h", data[pos:pos+2])[0], 14)
- pos += 2
- return coord, pos
-
- @staticmethod
- def compilePoints(points):
- # If the set consists of all points in the glyph, it gets encoded with
- # a special encoding: a single zero byte.
- #
- # To use this optimization, points passed in must be empty set.
- # The following two lines are not strictly necessary as the main code
- # below would emit the same. But this is most common and faster.
- if not points:
- return b'\0'
-
- # In the 'gvar' table, the packing of point numbers is a little surprising.
- # It consists of multiple runs, each being a delta-encoded list of integers.
- # For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as
- # [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1.
- # There are two types of runs, with values being either 8 or 16 bit unsigned
- # integers.
- points = list(points)
- points.sort()
- numPoints = len(points)
-
- result = bytearray()
- # The binary representation starts with the total number of points in the set,
- # encoded into one or two bytes depending on the value.
- if numPoints < 0x80:
- result.append(numPoints)
- else:
- result.append((numPoints >> 8) | 0x80)
- result.append(numPoints & 0xff)
-
- MAX_RUN_LENGTH = 127
- pos = 0
- lastValue = 0
- while pos < numPoints:
- runLength = 0
-
- headerPos = len(result)
- result.append(0)
-
- useByteEncoding = None
- while pos < numPoints and runLength <= MAX_RUN_LENGTH:
- curValue = points[pos]
- delta = curValue - lastValue
- if useByteEncoding is None:
- useByteEncoding = 0 <= delta <= 0xff
- if useByteEncoding and (delta > 0xff or delta < 0):
- # we need to start a new run (which will not use byte encoding)
- break
- # TODO This never switches back to a byte-encoding from a short-encoding.
- # That's suboptimal.
- if useByteEncoding:
- result.append(delta)
- else:
- result.append(delta >> 8)
- result.append(delta & 0xff)
- lastValue = curValue
- pos += 1
- runLength += 1
- if useByteEncoding:
- result[headerPos] = runLength - 1
- else:
- result[headerPos] = (runLength - 1) | POINTS_ARE_WORDS
-
- return result
-
- @staticmethod
- def decompilePoints_(numPoints, data, offset, tableTag):
- """(numPoints, data, offset, tableTag) --> ([point1, point2, ...], newOffset)"""
- assert tableTag in ('cvar', 'gvar')
- pos = offset
- numPointsInData = data[pos]
- pos += 1
- if (numPointsInData & POINTS_ARE_WORDS) != 0:
- numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | data[pos]
- pos += 1
- if numPointsInData == 0:
- return (range(numPoints), pos)
-
- result = []
- while len(result) < numPointsInData:
- runHeader = data[pos]
- pos += 1
- numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1
- point = 0
- if (runHeader & POINTS_ARE_WORDS) != 0:
- points = array.array("H")
- pointsSize = numPointsInRun * 2
- else:
- points = array.array("B")
- pointsSize = numPointsInRun
- points.frombytes(data[pos:pos+pointsSize])
- if sys.byteorder != "big": points.byteswap()
-
- assert len(points) == numPointsInRun
- pos += pointsSize
-
- result.extend(points)
-
- # Convert relative to absolute
- absolute = []
- current = 0
- for delta in result:
- current += delta
- absolute.append(current)
- result = absolute
- del absolute
-
- badPoints = {str(p) for p in result if p < 0 or p >= numPoints}
- if badPoints:
- log.warning("point %s out of range in '%s' table" %
- (",".join(sorted(badPoints)), tableTag))
- return (result, pos)
-
- def compileDeltas(self):
- deltaX = []
- deltaY = []
- if self.getCoordWidth() == 2:
- for c in self.coordinates:
- if c is None:
- continue
- deltaX.append(c[0])
- deltaY.append(c[1])
- else:
- for c in self.coordinates:
- if c is None:
- continue
- deltaX.append(c)
- bytearr = bytearray()
- self.compileDeltaValues_(deltaX, bytearr)
- self.compileDeltaValues_(deltaY, bytearr)
- return bytearr
-
- @staticmethod
- def compileDeltaValues_(deltas, bytearr=None):
- """[value1, value2, value3, ...] --> bytearray
-
- Emits a sequence of runs. Each run starts with a
- byte-sized header whose 6 least significant bits
- (header & 0x3F) indicate how many values are encoded
- in this run. The stored length is the actual length
- minus one; run lengths are thus in the range [1..64].
- If the header byte has its most significant bit (0x80)
- set, all values in this run are zero, and no data
- follows. Otherwise, the header byte is followed by
- ((header & 0x3F) + 1) signed values. If (header &
- 0x40) is clear, the delta values are stored as signed
- bytes; if (header & 0x40) is set, the delta values are
- signed 16-bit integers.
- """ # Explaining the format because the 'gvar' spec is hard to understand.
- if bytearr is None:
- bytearr = bytearray()
- pos = 0
- numDeltas = len(deltas)
- while pos < numDeltas:
- value = deltas[pos]
- if value == 0:
- pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, bytearr)
- elif -128 <= value <= 127:
- pos = TupleVariation.encodeDeltaRunAsBytes_(deltas, pos, bytearr)
- else:
- pos = TupleVariation.encodeDeltaRunAsWords_(deltas, pos, bytearr)
- return bytearr
-
- @staticmethod
- def encodeDeltaRunAsZeroes_(deltas, offset, bytearr):
- pos = offset
- numDeltas = len(deltas)
- while pos < numDeltas and deltas[pos] == 0:
- pos += 1
- runLength = pos - offset
- while runLength >= 64:
- bytearr.append(DELTAS_ARE_ZERO | 63)
- runLength -= 64
- if runLength:
- bytearr.append(DELTAS_ARE_ZERO | (runLength - 1))
- return pos
-
- @staticmethod
- def encodeDeltaRunAsBytes_(deltas, offset, bytearr):
- pos = offset
- numDeltas = len(deltas)
- while pos < numDeltas:
- value = deltas[pos]
- if not (-128 <= value <= 127):
- break
- # Within a byte-encoded run of deltas, a single zero
- # is best stored literally as 0x00 value. However,
- # if are two or more zeroes in a sequence, it is
- # better to start a new run. For example, the sequence
- # of deltas [15, 15, 0, 15, 15] becomes 6 bytes
- # (04 0F 0F 00 0F 0F) when storing the zero value
- # literally, but 7 bytes (01 0F 0F 80 01 0F 0F)
- # when starting a new run.
- if value == 0 and pos+1 < numDeltas and deltas[pos+1] == 0:
- break
- pos += 1
- runLength = pos - offset
- while runLength >= 64:
- bytearr.append(63)
- bytearr.extend(array.array('b', deltas[offset:offset+64]))
- offset += 64
- runLength -= 64
- if runLength:
- bytearr.append(runLength - 1)
- bytearr.extend(array.array('b', deltas[offset:pos]))
- return pos
-
- @staticmethod
- def encodeDeltaRunAsWords_(deltas, offset, bytearr):
- pos = offset
- numDeltas = len(deltas)
- while pos < numDeltas:
- value = deltas[pos]
- # Within a word-encoded run of deltas, it is easiest
- # to start a new run (with a different encoding)
- # whenever we encounter a zero value. For example,
- # the sequence [0x6666, 0, 0x7777] needs 7 bytes when
- # storing the zero literally (42 66 66 00 00 77 77),
- # and equally 7 bytes when starting a new run
- # (40 66 66 80 40 77 77).
- if value == 0:
- break
-
- # Within a word-encoded run of deltas, a single value
- # in the range (-128..127) should be encoded literally
- # because it is more compact. For example, the sequence
- # [0x6666, 2, 0x7777] becomes 7 bytes when storing
- # the value literally (42 66 66 00 02 77 77), but 8 bytes
- # when starting a new run (40 66 66 00 02 40 77 77).
- if (-128 <= value <= 127) and pos+1 < numDeltas and (-128 <= deltas[pos+1] <= 127):
- break
- pos += 1
- runLength = pos - offset
- while runLength >= 64:
- bytearr.append(DELTAS_ARE_WORDS | 63)
- a = array.array('h', deltas[offset:offset+64])
- if sys.byteorder != "big": a.byteswap()
- bytearr.extend(a)
- offset += 64
- runLength -= 64
- if runLength:
- bytearr.append(DELTAS_ARE_WORDS | (runLength - 1))
- a = array.array('h', deltas[offset:pos])
- if sys.byteorder != "big": a.byteswap()
- bytearr.extend(a)
- return pos
-
- @staticmethod
- def decompileDeltas_(numDeltas, data, offset):
- """(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)"""
- result = []
- pos = offset
- while len(result) < numDeltas:
- runHeader = data[pos]
- pos += 1
- numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1
- if (runHeader & DELTAS_ARE_ZERO) != 0:
- result.extend([0] * numDeltasInRun)
- else:
- if (runHeader & DELTAS_ARE_WORDS) != 0:
- deltas = array.array("h")
- deltasSize = numDeltasInRun * 2
- else:
- deltas = array.array("b")
- deltasSize = numDeltasInRun
- deltas.frombytes(data[pos:pos+deltasSize])
- if sys.byteorder != "big": deltas.byteswap()
- assert len(deltas) == numDeltasInRun
- pos += deltasSize
- result.extend(deltas)
- assert len(result) == numDeltas
- return (result, pos)
-
- @staticmethod
- def getTupleSize_(flags, axisCount):
- size = 4
- if (flags & EMBEDDED_PEAK_TUPLE) != 0:
- size += axisCount * 2
- if (flags & INTERMEDIATE_REGION) != 0:
- size += axisCount * 4
- return size
-
- def getCoordWidth(self):
- """ Return 2 if coordinates are (x, y) as in gvar, 1 if single values
- as in cvar, or 0 if empty.
- """
- firstDelta = next((c for c in self.coordinates if c is not None), None)
- if firstDelta is None:
- return 0 # empty or has no impact
- if type(firstDelta) in (int, float):
- return 1
- if type(firstDelta) is tuple and len(firstDelta) == 2:
- return 2
- raise TypeError(
- "invalid type of delta; expected (int or float) number, or "
- "Tuple[number, number]: %r" % firstDelta
- )
-
- def scaleDeltas(self, scalar):
- if scalar == 1.0:
- return # no change
- coordWidth = self.getCoordWidth()
- self.coordinates = [
- None
- if d is None
- else d * scalar
- if coordWidth == 1
- else (d[0] * scalar, d[1] * scalar)
- for d in self.coordinates
- ]
-
- def roundDeltas(self):
- coordWidth = self.getCoordWidth()
- self.coordinates = [
- None
- if d is None
- else otRound(d)
- if coordWidth == 1
- else (otRound(d[0]), otRound(d[1]))
- for d in self.coordinates
- ]
-
- def calcInferredDeltas(self, origCoords, endPts):
- from fontTools.varLib.iup import iup_delta
-
- if self.getCoordWidth() == 1:
- raise TypeError(
- "Only 'gvar' TupleVariation can have inferred deltas"
- )
- if None in self.coordinates:
- if len(self.coordinates) != len(origCoords):
- raise ValueError(
- "Expected len(origCoords) == %d; found %d"
- % (len(self.coordinates), len(origCoords))
- )
- self.coordinates = iup_delta(self.coordinates, origCoords, endPts)
-
- def optimize(self, origCoords, endPts, tolerance=0.5, isComposite=False):
- from fontTools.varLib.iup import iup_delta_optimize
-
- if None in self.coordinates:
- return # already optimized
-
- deltaOpt = iup_delta_optimize(
- self.coordinates, origCoords, endPts, tolerance=tolerance
- )
- if None in deltaOpt:
- if isComposite and all(d is None for d in deltaOpt):
- # Fix for macOS composites
- # https://github.com/fonttools/fonttools/issues/1381
- deltaOpt = [(0, 0)] + [None] * (len(deltaOpt) - 1)
- # Use "optimized" version only if smaller...
- varOpt = TupleVariation(self.axes, deltaOpt)
-
- # Shouldn't matter that this is different from fvar...?
- axisTags = sorted(self.axes.keys())
- tupleData, auxData = self.compile(axisTags)
- unoptimizedLength = len(tupleData) + len(auxData)
- tupleData, auxData = varOpt.compile(axisTags)
- optimizedLength = len(tupleData) + len(auxData)
-
- if optimizedLength < unoptimizedLength:
- self.coordinates = varOpt.coordinates
-
- def __iadd__(self, other):
- if not isinstance(other, TupleVariation):
- return NotImplemented
- deltas1 = self.coordinates
- length = len(deltas1)
- deltas2 = other.coordinates
- if len(deltas2) != length:
- raise ValueError(
- "cannot sum TupleVariation deltas with different lengths"
- )
- # 'None' values have different meanings in gvar vs cvar TupleVariations:
- # within the gvar, when deltas are not provided explicitly for some points,
- # they need to be inferred; whereas for the 'cvar' table, if deltas are not
- # provided for some CVT values, then no adjustments are made (i.e. None == 0).
- # Thus, we cannot sum deltas for gvar TupleVariations if they contain
- # inferred inferred deltas (the latter need to be computed first using
- # 'calcInferredDeltas' method), but we can treat 'None' values in cvar
- # deltas as if they are zeros.
- if self.getCoordWidth() == 2:
- for i, d2 in zip(range(length), deltas2):
- d1 = deltas1[i]
- try:
- deltas1[i] = (d1[0] + d2[0], d1[1] + d2[1])
- except TypeError:
- raise ValueError(
- "cannot sum gvar deltas with inferred points"
- )
- else:
- for i, d2 in zip(range(length), deltas2):
- d1 = deltas1[i]
- if d1 is not None and d2 is not None:
- deltas1[i] = d1 + d2
- elif d1 is None and d2 is not None:
- deltas1[i] = d2
- # elif d2 is None do nothing
- return self
+ def __init__(self, axes, coordinates):
+ self.axes = axes.copy()
+ self.coordinates = list(coordinates)
+
+ def __repr__(self):
+ axes = ",".join(
+ sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()])
+ )
+ return "<TupleVariation %s %s>" % (axes, self.coordinates)
+
+ def __eq__(self, other):
+ return self.coordinates == other.coordinates and self.axes == other.axes
+
+ def getUsedPoints(self):
+ # Empty set means "all points used".
+ if None not in self.coordinates:
+ return frozenset()
+ used = frozenset([i for i, p in enumerate(self.coordinates) if p is not None])
+ # Return None if no points used.
+ return used if used else None
+
+ def hasImpact(self):
+ """Returns True if this TupleVariation has any visible impact.
+
+ If the result is False, the TupleVariation can be omitted from the font
+ without making any visible difference.
+ """
+ return any(c is not None for c in self.coordinates)
+
+ def toXML(self, writer, axisTags):
+ writer.begintag("tuple")
+ writer.newline()
+ for axis in axisTags:
+ value = self.axes.get(axis)
+ if value is not None:
+ minValue, value, maxValue = value
+ defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
+ defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
+ if minValue == defaultMinValue and maxValue == defaultMaxValue:
+ writer.simpletag("coord", axis=axis, value=fl2str(value, 14))
+ else:
+ attrs = [
+ ("axis", axis),
+ ("min", fl2str(minValue, 14)),
+ ("value", fl2str(value, 14)),
+ ("max", fl2str(maxValue, 14)),
+ ]
+ writer.simpletag("coord", attrs)
+ writer.newline()
+ wrote_any_deltas = False
+ for i, delta in enumerate(self.coordinates):
+ if type(delta) == tuple and len(delta) == 2:
+ writer.simpletag("delta", pt=i, x=delta[0], y=delta[1])
+ writer.newline()
+ wrote_any_deltas = True
+ elif type(delta) == int:
+ writer.simpletag("delta", cvt=i, value=delta)
+ writer.newline()
+ wrote_any_deltas = True
+ elif delta is not None:
+ log.error("bad delta format")
+ writer.comment("bad delta #%d" % i)
+ writer.newline()
+ wrote_any_deltas = True
+ if not wrote_any_deltas:
+ writer.comment("no deltas")
+ writer.newline()
+ writer.endtag("tuple")
+ writer.newline()
+
+ def fromXML(self, name, attrs, _content):
+ if name == "coord":
+ axis = attrs["axis"]
+ value = str2fl(attrs["value"], 14)
+ defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
+ defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
+ minValue = str2fl(attrs.get("min", defaultMinValue), 14)
+ maxValue = str2fl(attrs.get("max", defaultMaxValue), 14)
+ self.axes[axis] = (minValue, value, maxValue)
+ elif name == "delta":
+ if "pt" in attrs:
+ point = safeEval(attrs["pt"])
+ x = safeEval(attrs["x"])
+ y = safeEval(attrs["y"])
+ self.coordinates[point] = (x, y)
+ elif "cvt" in attrs:
+ cvt = safeEval(attrs["cvt"])
+ value = safeEval(attrs["value"])
+ self.coordinates[cvt] = value
+ else:
+ log.warning("bad delta format: %s" % ", ".join(sorted(attrs.keys())))
+
+ def compile(self, axisTags, sharedCoordIndices={}, pointData=None):
+ assert set(self.axes.keys()) <= set(axisTags), (
+ "Unknown axis tag found.",
+ self.axes.keys(),
+ axisTags,
+ )
+
+ tupleData = []
+ auxData = []
+
+ if pointData is None:
+ usedPoints = self.getUsedPoints()
+ if usedPoints is None: # Nothing to encode
+ return b"", b""
+ pointData = self.compilePoints(usedPoints)
+
+ coord = self.compileCoord(axisTags)
+ flags = sharedCoordIndices.get(coord)
+ if flags is None:
+ flags = EMBEDDED_PEAK_TUPLE
+ tupleData.append(coord)
+
+ intermediateCoord = self.compileIntermediateCoord(axisTags)
+ if intermediateCoord is not None:
+ flags |= INTERMEDIATE_REGION
+ tupleData.append(intermediateCoord)
+
+ # pointData of b'' implies "use shared points".
+ if pointData:
+ flags |= PRIVATE_POINT_NUMBERS
+ auxData.append(pointData)
+
+ auxData.append(self.compileDeltas())
+ auxData = b"".join(auxData)
+
+ tupleData.insert(0, struct.pack(">HH", len(auxData), flags))
+ return b"".join(tupleData), auxData
+
+ def compileCoord(self, axisTags):
+ result = []
+ axes = self.axes
+ for axis in axisTags:
+ triple = axes.get(axis)
+ if triple is None:
+ result.append(b"\0\0")
+ else:
+ result.append(struct.pack(">h", fl2fi(triple[1], 14)))
+ return b"".join(result)
+
+ def compileIntermediateCoord(self, axisTags):
+ needed = False
+ for axis in axisTags:
+ minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
+ defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
+ defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
+ if (minValue != defaultMinValue) or (maxValue != defaultMaxValue):
+ needed = True
+ break
+ if not needed:
+ return None
+ minCoords = []
+ maxCoords = []
+ for axis in axisTags:
+ minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
+ minCoords.append(struct.pack(">h", fl2fi(minValue, 14)))
+ maxCoords.append(struct.pack(">h", fl2fi(maxValue, 14)))
+ return b"".join(minCoords + maxCoords)
+
+ @staticmethod
+ def decompileCoord_(axisTags, data, offset):
+ coord = {}
+ pos = offset
+ for axis in axisTags:
+ coord[axis] = fi2fl(struct.unpack(">h", data[pos : pos + 2])[0], 14)
+ pos += 2
+ return coord, pos
+
+ @staticmethod
+ def compilePoints(points):
+ # If the set consists of all points in the glyph, it gets encoded with
+ # a special encoding: a single zero byte.
+ #
+ # To use this optimization, points passed in must be empty set.
+ # The following two lines are not strictly necessary as the main code
+ # below would emit the same. But this is most common and faster.
+ if not points:
+ return b"\0"
+
+ # In the 'gvar' table, the packing of point numbers is a little surprising.
+ # It consists of multiple runs, each being a delta-encoded list of integers.
+ # For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as
+ # [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1.
+ # There are two types of runs, with values being either 8 or 16 bit unsigned
+ # integers.
+ points = list(points)
+ points.sort()
+ numPoints = len(points)
+
+ result = bytearray()
+ # The binary representation starts with the total number of points in the set,
+ # encoded into one or two bytes depending on the value.
+ if numPoints < 0x80:
+ result.append(numPoints)
+ else:
+ result.append((numPoints >> 8) | 0x80)
+ result.append(numPoints & 0xFF)
+
+ MAX_RUN_LENGTH = 127
+ pos = 0
+ lastValue = 0
+ while pos < numPoints:
+ runLength = 0
+
+ headerPos = len(result)
+ result.append(0)
+
+ useByteEncoding = None
+ while pos < numPoints and runLength <= MAX_RUN_LENGTH:
+ curValue = points[pos]
+ delta = curValue - lastValue
+ if useByteEncoding is None:
+ useByteEncoding = 0 <= delta <= 0xFF
+ if useByteEncoding and (delta > 0xFF or delta < 0):
+ # we need to start a new run (which will not use byte encoding)
+ break
+ # TODO This never switches back to a byte-encoding from a short-encoding.
+ # That's suboptimal.
+ if useByteEncoding:
+ result.append(delta)
+ else:
+ result.append(delta >> 8)
+ result.append(delta & 0xFF)
+ lastValue = curValue
+ pos += 1
+ runLength += 1
+ if useByteEncoding:
+ result[headerPos] = runLength - 1
+ else:
+ result[headerPos] = (runLength - 1) | POINTS_ARE_WORDS
+
+ return result
+
+ @staticmethod
+ def decompilePoints_(numPoints, data, offset, tableTag):
+ """(numPoints, data, offset, tableTag) --> ([point1, point2, ...], newOffset)"""
+ assert tableTag in ("cvar", "gvar")
+ pos = offset
+ numPointsInData = data[pos]
+ pos += 1
+ if (numPointsInData & POINTS_ARE_WORDS) != 0:
+ numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | data[pos]
+ pos += 1
+ if numPointsInData == 0:
+ return (range(numPoints), pos)
+
+ result = []
+ while len(result) < numPointsInData:
+ runHeader = data[pos]
+ pos += 1
+ numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1
+ point = 0
+ if (runHeader & POINTS_ARE_WORDS) != 0:
+ points = array.array("H")
+ pointsSize = numPointsInRun * 2
+ else:
+ points = array.array("B")
+ pointsSize = numPointsInRun
+ points.frombytes(data[pos : pos + pointsSize])
+ if sys.byteorder != "big":
+ points.byteswap()
+
+ assert len(points) == numPointsInRun
+ pos += pointsSize
+
+ result.extend(points)
+
+ # Convert relative to absolute
+ absolute = []
+ current = 0
+ for delta in result:
+ current += delta
+ absolute.append(current)
+ result = absolute
+ del absolute
+
+ badPoints = {str(p) for p in result if p < 0 or p >= numPoints}
+ if badPoints:
+ log.warning(
+ "point %s out of range in '%s' table"
+ % (",".join(sorted(badPoints)), tableTag)
+ )
+ return (result, pos)
+
+ def compileDeltas(self):
+ deltaX = []
+ deltaY = []
+ if self.getCoordWidth() == 2:
+ for c in self.coordinates:
+ if c is None:
+ continue
+ deltaX.append(c[0])
+ deltaY.append(c[1])
+ else:
+ for c in self.coordinates:
+ if c is None:
+ continue
+ deltaX.append(c)
+ bytearr = bytearray()
+ self.compileDeltaValues_(deltaX, bytearr)
+ self.compileDeltaValues_(deltaY, bytearr)
+ return bytearr
+
+ @staticmethod
+ def compileDeltaValues_(deltas, bytearr=None):
+ """[value1, value2, value3, ...] --> bytearray
+
+ Emits a sequence of runs. Each run starts with a
+ byte-sized header whose 6 least significant bits
+ (header & 0x3F) indicate how many values are encoded
+ in this run. The stored length is the actual length
+ minus one; run lengths are thus in the range [1..64].
+ If the header byte has its most significant bit (0x80)
+ set, all values in this run are zero, and no data
+ follows. Otherwise, the header byte is followed by
+ ((header & 0x3F) + 1) signed values. If (header &
+ 0x40) is clear, the delta values are stored as signed
+ bytes; if (header & 0x40) is set, the delta values are
+ signed 16-bit integers.
+ """ # Explaining the format because the 'gvar' spec is hard to understand.
+ if bytearr is None:
+ bytearr = bytearray()
+ pos = 0
+ numDeltas = len(deltas)
+ while pos < numDeltas:
+ value = deltas[pos]
+ if value == 0:
+ pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, bytearr)
+ elif -128 <= value <= 127:
+ pos = TupleVariation.encodeDeltaRunAsBytes_(deltas, pos, bytearr)
+ else:
+ pos = TupleVariation.encodeDeltaRunAsWords_(deltas, pos, bytearr)
+ return bytearr
+
+ @staticmethod
+ def encodeDeltaRunAsZeroes_(deltas, offset, bytearr):
+ pos = offset
+ numDeltas = len(deltas)
+ while pos < numDeltas and deltas[pos] == 0:
+ pos += 1
+ runLength = pos - offset
+ while runLength >= 64:
+ bytearr.append(DELTAS_ARE_ZERO | 63)
+ runLength -= 64
+ if runLength:
+ bytearr.append(DELTAS_ARE_ZERO | (runLength - 1))
+ return pos
+
+ @staticmethod
+ def encodeDeltaRunAsBytes_(deltas, offset, bytearr):
+ pos = offset
+ numDeltas = len(deltas)
+ while pos < numDeltas:
+ value = deltas[pos]
+ if not (-128 <= value <= 127):
+ break
+ # Within a byte-encoded run of deltas, a single zero
+ # is best stored literally as 0x00 value. However,
+ # if are two or more zeroes in a sequence, it is
+ # better to start a new run. For example, the sequence
+ # of deltas [15, 15, 0, 15, 15] becomes 6 bytes
+ # (04 0F 0F 00 0F 0F) when storing the zero value
+ # literally, but 7 bytes (01 0F 0F 80 01 0F 0F)
+ # when starting a new run.
+ if value == 0 and pos + 1 < numDeltas and deltas[pos + 1] == 0:
+ break
+ pos += 1
+ runLength = pos - offset
+ while runLength >= 64:
+ bytearr.append(63)
+ bytearr.extend(array.array("b", deltas[offset : offset + 64]))
+ offset += 64
+ runLength -= 64
+ if runLength:
+ bytearr.append(runLength - 1)
+ bytearr.extend(array.array("b", deltas[offset:pos]))
+ return pos
+
+ @staticmethod
+ def encodeDeltaRunAsWords_(deltas, offset, bytearr):
+ pos = offset
+ numDeltas = len(deltas)
+ while pos < numDeltas:
+ value = deltas[pos]
+ # Within a word-encoded run of deltas, it is easiest
+ # to start a new run (with a different encoding)
+ # whenever we encounter a zero value. For example,
+ # the sequence [0x6666, 0, 0x7777] needs 7 bytes when
+ # storing the zero literally (42 66 66 00 00 77 77),
+ # and equally 7 bytes when starting a new run
+ # (40 66 66 80 40 77 77).
+ if value == 0:
+ break
+
+ # Within a word-encoded run of deltas, a single value
+ # in the range (-128..127) should be encoded literally
+ # because it is more compact. For example, the sequence
+ # [0x6666, 2, 0x7777] becomes 7 bytes when storing
+ # the value literally (42 66 66 00 02 77 77), but 8 bytes
+ # when starting a new run (40 66 66 00 02 40 77 77).
+ if (
+ (-128 <= value <= 127)
+ and pos + 1 < numDeltas
+ and (-128 <= deltas[pos + 1] <= 127)
+ ):
+ break
+ pos += 1
+ runLength = pos - offset
+ while runLength >= 64:
+ bytearr.append(DELTAS_ARE_WORDS | 63)
+ a = array.array("h", deltas[offset : offset + 64])
+ if sys.byteorder != "big":
+ a.byteswap()
+ bytearr.extend(a)
+ offset += 64
+ runLength -= 64
+ if runLength:
+ bytearr.append(DELTAS_ARE_WORDS | (runLength - 1))
+ a = array.array("h", deltas[offset:pos])
+ if sys.byteorder != "big":
+ a.byteswap()
+ bytearr.extend(a)
+ return pos
+
+ @staticmethod
+ def decompileDeltas_(numDeltas, data, offset):
+ """(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)"""
+ result = []
+ pos = offset
+ while len(result) < numDeltas:
+ runHeader = data[pos]
+ pos += 1
+ numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1
+ if (runHeader & DELTAS_ARE_ZERO) != 0:
+ result.extend([0] * numDeltasInRun)
+ else:
+ if (runHeader & DELTAS_ARE_WORDS) != 0:
+ deltas = array.array("h")
+ deltasSize = numDeltasInRun * 2
+ else:
+ deltas = array.array("b")
+ deltasSize = numDeltasInRun
+ deltas.frombytes(data[pos : pos + deltasSize])
+ if sys.byteorder != "big":
+ deltas.byteswap()
+ assert len(deltas) == numDeltasInRun
+ pos += deltasSize
+ result.extend(deltas)
+ assert len(result) == numDeltas
+ return (result, pos)
+
+ @staticmethod
+ def getTupleSize_(flags, axisCount):
+ size = 4
+ if (flags & EMBEDDED_PEAK_TUPLE) != 0:
+ size += axisCount * 2
+ if (flags & INTERMEDIATE_REGION) != 0:
+ size += axisCount * 4
+ return size
+
+ def getCoordWidth(self):
+ """Return 2 if coordinates are (x, y) as in gvar, 1 if single values
+ as in cvar, or 0 if empty.
+ """
+ firstDelta = next((c for c in self.coordinates if c is not None), None)
+ if firstDelta is None:
+ return 0 # empty or has no impact
+ if type(firstDelta) in (int, float):
+ return 1
+ if type(firstDelta) is tuple and len(firstDelta) == 2:
+ return 2
+ raise TypeError(
+ "invalid type of delta; expected (int or float) number, or "
+ "Tuple[number, number]: %r" % firstDelta
+ )
+
+ def scaleDeltas(self, scalar):
+ if scalar == 1.0:
+ return # no change
+ coordWidth = self.getCoordWidth()
+ self.coordinates = [
+ None
+ if d is None
+ else d * scalar
+ if coordWidth == 1
+ else (d[0] * scalar, d[1] * scalar)
+ for d in self.coordinates
+ ]
+
+ def roundDeltas(self):
+ coordWidth = self.getCoordWidth()
+ self.coordinates = [
+ None
+ if d is None
+ else otRound(d)
+ if coordWidth == 1
+ else (otRound(d[0]), otRound(d[1]))
+ for d in self.coordinates
+ ]
+
+ def calcInferredDeltas(self, origCoords, endPts):
+ from fontTools.varLib.iup import iup_delta
+
+ if self.getCoordWidth() == 1:
+ raise TypeError("Only 'gvar' TupleVariation can have inferred deltas")
+ if None in self.coordinates:
+ if len(self.coordinates) != len(origCoords):
+ raise ValueError(
+ "Expected len(origCoords) == %d; found %d"
+ % (len(self.coordinates), len(origCoords))
+ )
+ self.coordinates = iup_delta(self.coordinates, origCoords, endPts)
+
+ def optimize(self, origCoords, endPts, tolerance=0.5, isComposite=False):
+ from fontTools.varLib.iup import iup_delta_optimize
+
+ if None in self.coordinates:
+ return # already optimized
+
+ deltaOpt = iup_delta_optimize(
+ self.coordinates, origCoords, endPts, tolerance=tolerance
+ )
+ if None in deltaOpt:
+ if isComposite and all(d is None for d in deltaOpt):
+ # Fix for macOS composites
+ # https://github.com/fonttools/fonttools/issues/1381
+ deltaOpt = [(0, 0)] + [None] * (len(deltaOpt) - 1)
+ # Use "optimized" version only if smaller...
+ varOpt = TupleVariation(self.axes, deltaOpt)
+
+ # Shouldn't matter that this is different from fvar...?
+ axisTags = sorted(self.axes.keys())
+ tupleData, auxData = self.compile(axisTags)
+ unoptimizedLength = len(tupleData) + len(auxData)
+ tupleData, auxData = varOpt.compile(axisTags)
+ optimizedLength = len(tupleData) + len(auxData)
+
+ if optimizedLength < unoptimizedLength:
+ self.coordinates = varOpt.coordinates
+
+ def __imul__(self, scalar):
+ self.scaleDeltas(scalar)
+ return self
+
+ def __iadd__(self, other):
+ if not isinstance(other, TupleVariation):
+ return NotImplemented
+ deltas1 = self.coordinates
+ length = len(deltas1)
+ deltas2 = other.coordinates
+ if len(deltas2) != length:
+ raise ValueError("cannot sum TupleVariation deltas with different lengths")
+ # 'None' values have different meanings in gvar vs cvar TupleVariations:
+ # within the gvar, when deltas are not provided explicitly for some points,
+ # they need to be inferred; whereas for the 'cvar' table, if deltas are not
+ # provided for some CVT values, then no adjustments are made (i.e. None == 0).
+ # Thus, we cannot sum deltas for gvar TupleVariations if they contain
+ # inferred inferred deltas (the latter need to be computed first using
+ # 'calcInferredDeltas' method), but we can treat 'None' values in cvar
+ # deltas as if they are zeros.
+ if self.getCoordWidth() == 2:
+ for i, d2 in zip(range(length), deltas2):
+ d1 = deltas1[i]
+ try:
+ deltas1[i] = (d1[0] + d2[0], d1[1] + d2[1])
+ except TypeError:
+ raise ValueError("cannot sum gvar deltas with inferred points")
+ else:
+ for i, d2 in zip(range(length), deltas2):
+ d1 = deltas1[i]
+ if d1 is not None and d2 is not None:
+ deltas1[i] = d1 + d2
+ elif d1 is None and d2 is not None:
+ deltas1[i] = d2
+ # elif d2 is None do nothing
+ return self
def decompileSharedTuples(axisTags, sharedTupleCount, data, offset):
- result = []
- for _ in range(sharedTupleCount):
- t, offset = TupleVariation.decompileCoord_(axisTags, data, offset)
- result.append(t)
- return result
-
-
-def compileSharedTuples(axisTags, variations,
- MAX_NUM_SHARED_COORDS = TUPLE_INDEX_MASK + 1):
- coordCount = Counter()
- for var in variations:
- coord = var.compileCoord(axisTags)
- coordCount[coord] += 1
- # In python < 3.7, most_common() ordering is non-deterministic
- # so apply a sort to make sure the ordering is consistent.
- sharedCoords = sorted(
- coordCount.most_common(MAX_NUM_SHARED_COORDS),
- key=lambda item: (-item[1], item[0]),
- )
- return [c[0] for c in sharedCoords if c[1] > 1]
-
-
-def compileTupleVariationStore(variations, pointCount,
- axisTags, sharedTupleIndices,
- useSharedPoints=True):
- newVariations = []
- pointDatas = []
- # Compile all points and figure out sharing if desired
- sharedPoints = None
-
- # Collect, count, and compile point-sets for all variation sets
- pointSetCount = defaultdict(int)
- for v in variations:
- points = v.getUsedPoints()
- if points is None: # Empty variations
- continue
- pointSetCount[points] += 1
- newVariations.append(v)
- pointDatas.append(points)
- variations = newVariations
- del newVariations
-
- if not variations:
- return (0, b"", b"")
-
- n = len(variations[0].coordinates)
- assert all(len(v.coordinates) == n for v in variations), "Variation sets have different sizes"
-
- compiledPoints = {pointSet:TupleVariation.compilePoints(pointSet)
- for pointSet in pointSetCount}
-
- tupleVariationCount = len(variations)
- tuples = []
- data = []
-
- if useSharedPoints:
- # Find point-set which saves most bytes.
- def key(pn):
- pointSet = pn[0]
- count = pn[1]
- return len(compiledPoints[pointSet]) * (count - 1)
- sharedPoints = max(pointSetCount.items(), key=key)[0]
-
- data.append(compiledPoints[sharedPoints])
- tupleVariationCount |= TUPLES_SHARE_POINT_NUMBERS
-
- # b'' implies "use shared points"
- pointDatas = [compiledPoints[points] if points != sharedPoints else b''
- for points in pointDatas]
-
- for v,p in zip(variations, pointDatas):
- thisTuple, thisData = v.compile(axisTags, sharedTupleIndices, pointData=p)
-
- tuples.append(thisTuple)
- data.append(thisData)
-
- tuples = b''.join(tuples)
- data = b''.join(data)
- return tupleVariationCount, tuples, data
-
-
-def decompileTupleVariationStore(tableTag, axisTags,
- tupleVariationCount, pointCount, sharedTuples,
- data, pos, dataPos):
- numAxes = len(axisTags)
- result = []
- if (tupleVariationCount & TUPLES_SHARE_POINT_NUMBERS) != 0:
- sharedPoints, dataPos = TupleVariation.decompilePoints_(
- pointCount, data, dataPos, tableTag)
- else:
- sharedPoints = []
- for _ in range(tupleVariationCount & TUPLE_COUNT_MASK):
- dataSize, flags = struct.unpack(">HH", data[pos:pos+4])
- tupleSize = TupleVariation.getTupleSize_(flags, numAxes)
- tupleData = data[pos : pos + tupleSize]
- pointDeltaData = data[dataPos : dataPos + dataSize]
- result.append(decompileTupleVariation_(
- pointCount, sharedTuples, sharedPoints,
- tableTag, axisTags, tupleData, pointDeltaData))
- pos += tupleSize
- dataPos += dataSize
- return result
-
-
-def decompileTupleVariation_(pointCount, sharedTuples, sharedPoints,
- tableTag, axisTags, data, tupleData):
- assert tableTag in ("cvar", "gvar"), tableTag
- flags = struct.unpack(">H", data[2:4])[0]
- pos = 4
- if (flags & EMBEDDED_PEAK_TUPLE) == 0:
- peak = sharedTuples[flags & TUPLE_INDEX_MASK]
- else:
- peak, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
- if (flags & INTERMEDIATE_REGION) != 0:
- start, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
- end, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
- else:
- start, end = inferRegion_(peak)
- axes = {}
- for axis in axisTags:
- region = start[axis], peak[axis], end[axis]
- if region != (0.0, 0.0, 0.0):
- axes[axis] = region
- pos = 0
- if (flags & PRIVATE_POINT_NUMBERS) != 0:
- points, pos = TupleVariation.decompilePoints_(
- pointCount, tupleData, pos, tableTag)
- else:
- points = sharedPoints
-
- deltas = [None] * pointCount
-
- if tableTag == "cvar":
- deltas_cvt, pos = TupleVariation.decompileDeltas_(
- len(points), tupleData, pos)
- for p, delta in zip(points, deltas_cvt):
- if 0 <= p < pointCount:
- deltas[p] = delta
-
- elif tableTag == "gvar":
- deltas_x, pos = TupleVariation.decompileDeltas_(
- len(points), tupleData, pos)
- deltas_y, pos = TupleVariation.decompileDeltas_(
- len(points), tupleData, pos)
- for p, x, y in zip(points, deltas_x, deltas_y):
- if 0 <= p < pointCount:
- deltas[p] = (x, y)
-
- return TupleVariation(axes, deltas)
+ result = []
+ for _ in range(sharedTupleCount):
+ t, offset = TupleVariation.decompileCoord_(axisTags, data, offset)
+ result.append(t)
+ return result
+
+
+def compileSharedTuples(
+ axisTags, variations, MAX_NUM_SHARED_COORDS=TUPLE_INDEX_MASK + 1
+):
+ coordCount = Counter()
+ for var in variations:
+ coord = var.compileCoord(axisTags)
+ coordCount[coord] += 1
+ # In python < 3.7, most_common() ordering is non-deterministic
+ # so apply a sort to make sure the ordering is consistent.
+ sharedCoords = sorted(
+ coordCount.most_common(MAX_NUM_SHARED_COORDS),
+ key=lambda item: (-item[1], item[0]),
+ )
+ return [c[0] for c in sharedCoords if c[1] > 1]
+
+
+def compileTupleVariationStore(
+ variations, pointCount, axisTags, sharedTupleIndices, useSharedPoints=True
+):
+ # pointCount is actually unused. Keeping for API compat.
+ del pointCount
+ newVariations = []
+ pointDatas = []
+ # Compile all points and figure out sharing if desired
+ sharedPoints = None
+
+ # Collect, count, and compile point-sets for all variation sets
+ pointSetCount = defaultdict(int)
+ for v in variations:
+ points = v.getUsedPoints()
+ if points is None: # Empty variations
+ continue
+ pointSetCount[points] += 1
+ newVariations.append(v)
+ pointDatas.append(points)
+ variations = newVariations
+ del newVariations
+
+ if not variations:
+ return (0, b"", b"")
+
+ n = len(variations[0].coordinates)
+ assert all(
+ len(v.coordinates) == n for v in variations
+ ), "Variation sets have different sizes"
+
+ compiledPoints = {
+ pointSet: TupleVariation.compilePoints(pointSet) for pointSet in pointSetCount
+ }
+
+ tupleVariationCount = len(variations)
+ tuples = []
+ data = []
+
+ if useSharedPoints:
+ # Find point-set which saves most bytes.
+ def key(pn):
+ pointSet = pn[0]
+ count = pn[1]
+ return len(compiledPoints[pointSet]) * (count - 1)
+
+ sharedPoints = max(pointSetCount.items(), key=key)[0]
+
+ data.append(compiledPoints[sharedPoints])
+ tupleVariationCount |= TUPLES_SHARE_POINT_NUMBERS
+
+ # b'' implies "use shared points"
+ pointDatas = [
+ compiledPoints[points] if points != sharedPoints else b""
+ for points in pointDatas
+ ]
+
+ for v, p in zip(variations, pointDatas):
+ thisTuple, thisData = v.compile(axisTags, sharedTupleIndices, pointData=p)
+
+ tuples.append(thisTuple)
+ data.append(thisData)
+
+ tuples = b"".join(tuples)
+ data = b"".join(data)
+ return tupleVariationCount, tuples, data
+
+
+def decompileTupleVariationStore(
+ tableTag,
+ axisTags,
+ tupleVariationCount,
+ pointCount,
+ sharedTuples,
+ data,
+ pos,
+ dataPos,
+):
+ numAxes = len(axisTags)
+ result = []
+ if (tupleVariationCount & TUPLES_SHARE_POINT_NUMBERS) != 0:
+ sharedPoints, dataPos = TupleVariation.decompilePoints_(
+ pointCount, data, dataPos, tableTag
+ )
+ else:
+ sharedPoints = []
+ for _ in range(tupleVariationCount & TUPLE_COUNT_MASK):
+ dataSize, flags = struct.unpack(">HH", data[pos : pos + 4])
+ tupleSize = TupleVariation.getTupleSize_(flags, numAxes)
+ tupleData = data[pos : pos + tupleSize]
+ pointDeltaData = data[dataPos : dataPos + dataSize]
+ result.append(
+ decompileTupleVariation_(
+ pointCount,
+ sharedTuples,
+ sharedPoints,
+ tableTag,
+ axisTags,
+ tupleData,
+ pointDeltaData,
+ )
+ )
+ pos += tupleSize
+ dataPos += dataSize
+ return result
+
+
+def decompileTupleVariation_(
+ pointCount, sharedTuples, sharedPoints, tableTag, axisTags, data, tupleData
+):
+ assert tableTag in ("cvar", "gvar"), tableTag
+ flags = struct.unpack(">H", data[2:4])[0]
+ pos = 4
+ if (flags & EMBEDDED_PEAK_TUPLE) == 0:
+ peak = sharedTuples[flags & TUPLE_INDEX_MASK]
+ else:
+ peak, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
+ if (flags & INTERMEDIATE_REGION) != 0:
+ start, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
+ end, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
+ else:
+ start, end = inferRegion_(peak)
+ axes = {}
+ for axis in axisTags:
+ region = start[axis], peak[axis], end[axis]
+ if region != (0.0, 0.0, 0.0):
+ axes[axis] = region
+ pos = 0
+ if (flags & PRIVATE_POINT_NUMBERS) != 0:
+ points, pos = TupleVariation.decompilePoints_(
+ pointCount, tupleData, pos, tableTag
+ )
+ else:
+ points = sharedPoints
+
+ deltas = [None] * pointCount
+
+ if tableTag == "cvar":
+ deltas_cvt, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
+ for p, delta in zip(points, deltas_cvt):
+ if 0 <= p < pointCount:
+ deltas[p] = delta
+
+ elif tableTag == "gvar":
+ deltas_x, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
+ deltas_y, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
+ for p, x, y in zip(points, deltas_x, deltas_y):
+ if 0 <= p < pointCount:
+ deltas[p] = (x, y)
+
+ return TupleVariation(axes, deltas)
def inferRegion_(peak):
- """Infer start and end for a (non-intermediate) region
-
- This helper function computes the applicability region for
- variation tuples whose INTERMEDIATE_REGION flag is not set in the
- TupleVariationHeader structure. Variation tuples apply only to
- certain regions of the variation space; outside that region, the
- tuple has no effect. To make the binary encoding more compact,
- TupleVariationHeaders can omit the intermediateStartTuple and
- intermediateEndTuple fields.
+ """Infer start and end for a (non-intermediate) region
+
+ This helper function computes the applicability region for
+ variation tuples whose INTERMEDIATE_REGION flag is not set in the
+ TupleVariationHeader structure. Variation tuples apply only to
+ certain regions of the variation space; outside that region, the
+ tuple has no effect. To make the binary encoding more compact,
+ TupleVariationHeaders can omit the intermediateStartTuple and
+ intermediateEndTuple fields.
"""
- start, end = {}, {}
- for (axis, value) in peak.items():
- start[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
- end[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
- return (start, end)
+ start, end = {}, {}
+ for axis, value in peak.items():
+ start[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
+ end[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
+ return (start, end)
diff --git a/Lib/fontTools/ttLib/tables/V_D_M_X_.py b/Lib/fontTools/ttLib/tables/V_D_M_X_.py
index ba8593f1..0632173c 100644
--- a/Lib/fontTools/ttLib/tables/V_D_M_X_.py
+++ b/Lib/fontTools/ttLib/tables/V_D_M_X_.py
@@ -37,196 +37,205 @@ VDMX_vTableFmt = """
class table_V_D_M_X_(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ pos = 0 # track current position from to start of VDMX table
+ dummy, data = sstruct.unpack2(VDMX_HeaderFmt, data, self)
+ pos += sstruct.calcsize(VDMX_HeaderFmt)
+ self.ratRanges = []
+ for i in range(self.numRatios):
+ ratio, data = sstruct.unpack2(VDMX_RatRangeFmt, data)
+ pos += sstruct.calcsize(VDMX_RatRangeFmt)
+ # the mapping between a ratio and a group is defined further below
+ ratio["groupIndex"] = None
+ self.ratRanges.append(ratio)
+ lenOffset = struct.calcsize(">H")
+ _offsets = [] # temporarily store offsets to groups
+ for i in range(self.numRatios):
+ offset = struct.unpack(">H", data[0:lenOffset])[0]
+ data = data[lenOffset:]
+ pos += lenOffset
+ _offsets.append(offset)
+ self.groups = []
+ for groupIndex in range(self.numRecs):
+ # the offset to this group from beginning of the VDMX table
+ currOffset = pos
+ group, data = sstruct.unpack2(VDMX_GroupFmt, data)
+ # the group lenght and bounding sizes are re-calculated on compile
+ recs = group.pop("recs")
+ startsz = group.pop("startsz")
+ endsz = group.pop("endsz")
+ pos += sstruct.calcsize(VDMX_GroupFmt)
+ for j in range(recs):
+ vTable, data = sstruct.unpack2(VDMX_vTableFmt, data)
+ vTableLength = sstruct.calcsize(VDMX_vTableFmt)
+ pos += vTableLength
+ # group is a dict of (yMax, yMin) tuples keyed by yPelHeight
+ group[vTable["yPelHeight"]] = (vTable["yMax"], vTable["yMin"])
+ # make sure startsz and endsz match the calculated values
+ minSize = min(group.keys())
+ maxSize = max(group.keys())
+ assert (
+ startsz == minSize
+ ), "startsz (%s) must equal min yPelHeight (%s): group %d" % (
+ group.startsz,
+ minSize,
+ groupIndex,
+ )
+ assert (
+ endsz == maxSize
+ ), "endsz (%s) must equal max yPelHeight (%s): group %d" % (
+ group.endsz,
+ maxSize,
+ groupIndex,
+ )
+ self.groups.append(group)
+ # match the defined offsets with the current group's offset
+ for offsetIndex, offsetValue in enumerate(_offsets):
+ # when numRecs < numRatios there can more than one ratio range
+ # sharing the same VDMX group
+ if currOffset == offsetValue:
+ # map the group with the ratio range thas has the same
+ # index as the offset to that group (it took me a while..)
+ self.ratRanges[offsetIndex]["groupIndex"] = groupIndex
+ # check that all ratio ranges have a group
+ for i in range(self.numRatios):
+ ratio = self.ratRanges[i]
+ if ratio["groupIndex"] is None:
+ from fontTools import ttLib
- def decompile(self, data, ttFont):
- pos = 0 # track current position from to start of VDMX table
- dummy, data = sstruct.unpack2(VDMX_HeaderFmt, data, self)
- pos += sstruct.calcsize(VDMX_HeaderFmt)
- self.ratRanges = []
- for i in range(self.numRatios):
- ratio, data = sstruct.unpack2(VDMX_RatRangeFmt, data)
- pos += sstruct.calcsize(VDMX_RatRangeFmt)
- # the mapping between a ratio and a group is defined further below
- ratio['groupIndex'] = None
- self.ratRanges.append(ratio)
- lenOffset = struct.calcsize('>H')
- _offsets = [] # temporarily store offsets to groups
- for i in range(self.numRatios):
- offset = struct.unpack('>H', data[0:lenOffset])[0]
- data = data[lenOffset:]
- pos += lenOffset
- _offsets.append(offset)
- self.groups = []
- for groupIndex in range(self.numRecs):
- # the offset to this group from beginning of the VDMX table
- currOffset = pos
- group, data = sstruct.unpack2(VDMX_GroupFmt, data)
- # the group lenght and bounding sizes are re-calculated on compile
- recs = group.pop('recs')
- startsz = group.pop('startsz')
- endsz = group.pop('endsz')
- pos += sstruct.calcsize(VDMX_GroupFmt)
- for j in range(recs):
- vTable, data = sstruct.unpack2(VDMX_vTableFmt, data)
- vTableLength = sstruct.calcsize(VDMX_vTableFmt)
- pos += vTableLength
- # group is a dict of (yMax, yMin) tuples keyed by yPelHeight
- group[vTable['yPelHeight']] = (vTable['yMax'], vTable['yMin'])
- # make sure startsz and endsz match the calculated values
- minSize = min(group.keys())
- maxSize = max(group.keys())
- assert startsz == minSize, \
- "startsz (%s) must equal min yPelHeight (%s): group %d" % \
- (group.startsz, minSize, groupIndex)
- assert endsz == maxSize, \
- "endsz (%s) must equal max yPelHeight (%s): group %d" % \
- (group.endsz, maxSize, groupIndex)
- self.groups.append(group)
- # match the defined offsets with the current group's offset
- for offsetIndex, offsetValue in enumerate(_offsets):
- # when numRecs < numRatios there can more than one ratio range
- # sharing the same VDMX group
- if currOffset == offsetValue:
- # map the group with the ratio range thas has the same
- # index as the offset to that group (it took me a while..)
- self.ratRanges[offsetIndex]['groupIndex'] = groupIndex
- # check that all ratio ranges have a group
- for i in range(self.numRatios):
- ratio = self.ratRanges[i]
- if ratio['groupIndex'] is None:
- from fontTools import ttLib
- raise ttLib.TTLibError(
- "no group defined for ratRange %d" % i)
+ raise ttLib.TTLibError("no group defined for ratRange %d" % i)
- def _getOffsets(self):
- """
- Calculate offsets to VDMX_Group records.
- For each ratRange return a list of offset values from the beginning of
- the VDMX table to a VDMX_Group.
- """
- lenHeader = sstruct.calcsize(VDMX_HeaderFmt)
- lenRatRange = sstruct.calcsize(VDMX_RatRangeFmt)
- lenOffset = struct.calcsize('>H')
- lenGroupHeader = sstruct.calcsize(VDMX_GroupFmt)
- lenVTable = sstruct.calcsize(VDMX_vTableFmt)
- # offset to the first group
- pos = lenHeader + self.numRatios*lenRatRange + self.numRatios*lenOffset
- groupOffsets = []
- for group in self.groups:
- groupOffsets.append(pos)
- lenGroup = lenGroupHeader + len(group) * lenVTable
- pos += lenGroup # offset to next group
- offsets = []
- for ratio in self.ratRanges:
- groupIndex = ratio['groupIndex']
- offsets.append(groupOffsets[groupIndex])
- return offsets
+ def _getOffsets(self):
+ """
+ Calculate offsets to VDMX_Group records.
+ For each ratRange return a list of offset values from the beginning of
+ the VDMX table to a VDMX_Group.
+ """
+ lenHeader = sstruct.calcsize(VDMX_HeaderFmt)
+ lenRatRange = sstruct.calcsize(VDMX_RatRangeFmt)
+ lenOffset = struct.calcsize(">H")
+ lenGroupHeader = sstruct.calcsize(VDMX_GroupFmt)
+ lenVTable = sstruct.calcsize(VDMX_vTableFmt)
+ # offset to the first group
+ pos = lenHeader + self.numRatios * lenRatRange + self.numRatios * lenOffset
+ groupOffsets = []
+ for group in self.groups:
+ groupOffsets.append(pos)
+ lenGroup = lenGroupHeader + len(group) * lenVTable
+ pos += lenGroup # offset to next group
+ offsets = []
+ for ratio in self.ratRanges:
+ groupIndex = ratio["groupIndex"]
+ offsets.append(groupOffsets[groupIndex])
+ return offsets
- def compile(self, ttFont):
- if not(self.version == 0 or self.version == 1):
- from fontTools import ttLib
- raise ttLib.TTLibError(
- "unknown format for VDMX table: version %s" % self.version)
- data = sstruct.pack(VDMX_HeaderFmt, self)
- for ratio in self.ratRanges:
- data += sstruct.pack(VDMX_RatRangeFmt, ratio)
- # recalculate offsets to VDMX groups
- for offset in self._getOffsets():
- data += struct.pack('>H', offset)
- for group in self.groups:
- recs = len(group)
- startsz = min(group.keys())
- endsz = max(group.keys())
- gHeader = {'recs': recs, 'startsz': startsz, 'endsz': endsz}
- data += sstruct.pack(VDMX_GroupFmt, gHeader)
- for yPelHeight, (yMax, yMin) in sorted(group.items()):
- vTable = {'yPelHeight': yPelHeight, 'yMax': yMax, 'yMin': yMin}
- data += sstruct.pack(VDMX_vTableFmt, vTable)
- return data
+ def compile(self, ttFont):
+ if not (self.version == 0 or self.version == 1):
+ from fontTools import ttLib
- def toXML(self, writer, ttFont):
- writer.simpletag("version", value=self.version)
- writer.newline()
- writer.begintag("ratRanges")
- writer.newline()
- for ratio in self.ratRanges:
- groupIndex = ratio['groupIndex']
- writer.simpletag(
- "ratRange",
- bCharSet=ratio['bCharSet'],
- xRatio=ratio['xRatio'],
- yStartRatio=ratio['yStartRatio'],
- yEndRatio=ratio['yEndRatio'],
- groupIndex=groupIndex
- )
- writer.newline()
- writer.endtag("ratRanges")
- writer.newline()
- writer.begintag("groups")
- writer.newline()
- for groupIndex in range(self.numRecs):
- group = self.groups[groupIndex]
- recs = len(group)
- startsz = min(group.keys())
- endsz = max(group.keys())
- writer.begintag("group", index=groupIndex)
- writer.newline()
- writer.comment("recs=%d, startsz=%d, endsz=%d" %
- (recs, startsz, endsz))
- writer.newline()
- for yPelHeight, (yMax, yMin) in sorted(group.items()):
- writer.simpletag(
- "record",
- [('yPelHeight', yPelHeight), ('yMax', yMax), ('yMin', yMin)])
- writer.newline()
- writer.endtag("group")
- writer.newline()
- writer.endtag("groups")
- writer.newline()
+ raise ttLib.TTLibError(
+ "unknown format for VDMX table: version %s" % self.version
+ )
+ data = sstruct.pack(VDMX_HeaderFmt, self)
+ for ratio in self.ratRanges:
+ data += sstruct.pack(VDMX_RatRangeFmt, ratio)
+ # recalculate offsets to VDMX groups
+ for offset in self._getOffsets():
+ data += struct.pack(">H", offset)
+ for group in self.groups:
+ recs = len(group)
+ startsz = min(group.keys())
+ endsz = max(group.keys())
+ gHeader = {"recs": recs, "startsz": startsz, "endsz": endsz}
+ data += sstruct.pack(VDMX_GroupFmt, gHeader)
+ for yPelHeight, (yMax, yMin) in sorted(group.items()):
+ vTable = {"yPelHeight": yPelHeight, "yMax": yMax, "yMin": yMin}
+ data += sstruct.pack(VDMX_vTableFmt, vTable)
+ return data
- def fromXML(self, name, attrs, content, ttFont):
- if name == "version":
- self.version = safeEval(attrs["value"])
- elif name == "ratRanges":
- if not hasattr(self, "ratRanges"):
- self.ratRanges = []
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == "ratRange":
- if not hasattr(self, "numRatios"):
- self.numRatios = 1
- else:
- self.numRatios += 1
- ratio = {
- "bCharSet": safeEval(attrs["bCharSet"]),
- "xRatio": safeEval(attrs["xRatio"]),
- "yStartRatio": safeEval(attrs["yStartRatio"]),
- "yEndRatio": safeEval(attrs["yEndRatio"]),
- "groupIndex": safeEval(attrs["groupIndex"])
- }
- self.ratRanges.append(ratio)
- elif name == "groups":
- if not hasattr(self, "groups"):
- self.groups = []
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == "group":
- if not hasattr(self, "numRecs"):
- self.numRecs = 1
- else:
- self.numRecs += 1
- group = {}
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == "record":
- yPelHeight = safeEval(attrs["yPelHeight"])
- yMax = safeEval(attrs["yMax"])
- yMin = safeEval(attrs["yMin"])
- group[yPelHeight] = (yMax, yMin)
- self.groups.append(group)
+ def toXML(self, writer, ttFont):
+ writer.simpletag("version", value=self.version)
+ writer.newline()
+ writer.begintag("ratRanges")
+ writer.newline()
+ for ratio in self.ratRanges:
+ groupIndex = ratio["groupIndex"]
+ writer.simpletag(
+ "ratRange",
+ bCharSet=ratio["bCharSet"],
+ xRatio=ratio["xRatio"],
+ yStartRatio=ratio["yStartRatio"],
+ yEndRatio=ratio["yEndRatio"],
+ groupIndex=groupIndex,
+ )
+ writer.newline()
+ writer.endtag("ratRanges")
+ writer.newline()
+ writer.begintag("groups")
+ writer.newline()
+ for groupIndex in range(self.numRecs):
+ group = self.groups[groupIndex]
+ recs = len(group)
+ startsz = min(group.keys())
+ endsz = max(group.keys())
+ writer.begintag("group", index=groupIndex)
+ writer.newline()
+ writer.comment("recs=%d, startsz=%d, endsz=%d" % (recs, startsz, endsz))
+ writer.newline()
+ for yPelHeight, (yMax, yMin) in sorted(group.items()):
+ writer.simpletag(
+ "record",
+ [("yPelHeight", yPelHeight), ("yMax", yMax), ("yMin", yMin)],
+ )
+ writer.newline()
+ writer.endtag("group")
+ writer.newline()
+ writer.endtag("groups")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "version":
+ self.version = safeEval(attrs["value"])
+ elif name == "ratRanges":
+ if not hasattr(self, "ratRanges"):
+ self.ratRanges = []
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "ratRange":
+ if not hasattr(self, "numRatios"):
+ self.numRatios = 1
+ else:
+ self.numRatios += 1
+ ratio = {
+ "bCharSet": safeEval(attrs["bCharSet"]),
+ "xRatio": safeEval(attrs["xRatio"]),
+ "yStartRatio": safeEval(attrs["yStartRatio"]),
+ "yEndRatio": safeEval(attrs["yEndRatio"]),
+ "groupIndex": safeEval(attrs["groupIndex"]),
+ }
+ self.ratRanges.append(ratio)
+ elif name == "groups":
+ if not hasattr(self, "groups"):
+ self.groups = []
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "group":
+ if not hasattr(self, "numRecs"):
+ self.numRecs = 1
+ else:
+ self.numRecs += 1
+ group = {}
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "record":
+ yPelHeight = safeEval(attrs["yPelHeight"])
+ yMax = safeEval(attrs["yMax"])
+ yMin = safeEval(attrs["yMin"])
+ group[yPelHeight] = (yMax, yMin)
+ self.groups.append(group)
diff --git a/Lib/fontTools/ttLib/tables/V_O_R_G_.py b/Lib/fontTools/ttLib/tables/V_O_R_G_.py
index e03e164b..4508c137 100644
--- a/Lib/fontTools/ttLib/tables/V_O_R_G_.py
+++ b/Lib/fontTools/ttLib/tables/V_O_R_G_.py
@@ -5,135 +5,155 @@ import struct
class table_V_O_R_G_(DefaultTable.DefaultTable):
- """This table is structured so that you can treat it like a dictionary keyed by glyph name.
-
- ``ttFont['VORG'][<glyphName>]`` will return the vertical origin for any glyph.
-
- ``ttFont['VORG'][<glyphName>] = <value>`` will set the vertical origin for any glyph.
- """
-
- def decompile(self, data, ttFont):
- self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID
- self.majorVersion, self.minorVersion, self.defaultVertOriginY, self.numVertOriginYMetrics = struct.unpack(">HHhH", data[:8])
- assert (self.majorVersion <= 1), "Major version of VORG table is higher than I know how to handle"
- data = data[8:]
- vids = []
- gids = []
- pos = 0
- for i in range(self.numVertOriginYMetrics):
- gid, vOrigin = struct.unpack(">Hh", data[pos:pos+4])
- pos += 4
- gids.append(gid)
- vids.append(vOrigin)
-
- self.VOriginRecords = vOrig = {}
- glyphOrder = ttFont.getGlyphOrder()
- try:
- names = [glyphOrder[gid] for gid in gids]
- except IndexError:
- getGlyphName = self.getGlyphName
- names = map(getGlyphName, gids)
-
- for name, vid in zip(names, vids):
- vOrig[name] = vid
-
- def compile(self, ttFont):
- vorgs = list(self.VOriginRecords.values())
- names = list(self.VOriginRecords.keys())
- nameMap = ttFont.getReverseGlyphMap()
- try:
- gids = [nameMap[name] for name in names]
- except KeyError:
- nameMap = ttFont.getReverseGlyphMap(rebuild=True)
- gids = [nameMap[name] for name in names]
- vOriginTable = list(zip(gids, vorgs))
- self.numVertOriginYMetrics = len(vorgs)
- vOriginTable.sort() # must be in ascending GID order
- dataList = [struct.pack(">Hh", rec[0], rec[1]) for rec in vOriginTable]
- header = struct.pack(">HHhH", self.majorVersion, self.minorVersion, self.defaultVertOriginY, self.numVertOriginYMetrics)
- dataList.insert(0, header)
- data = bytesjoin(dataList)
- return data
-
- def toXML(self, writer, ttFont):
- writer.simpletag("majorVersion", value=self.majorVersion)
- writer.newline()
- writer.simpletag("minorVersion", value=self.minorVersion)
- writer.newline()
- writer.simpletag("defaultVertOriginY", value=self.defaultVertOriginY)
- writer.newline()
- writer.simpletag("numVertOriginYMetrics", value=self.numVertOriginYMetrics)
- writer.newline()
- vOriginTable = []
- glyphNames = self.VOriginRecords.keys()
- for glyphName in glyphNames:
- try:
- gid = ttFont.getGlyphID(glyphName)
- except:
- assert 0, "VORG table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName)
- vOriginTable.append([gid, glyphName, self.VOriginRecords[glyphName]])
- vOriginTable.sort()
- for entry in vOriginTable:
- vOriginRec = VOriginRecord(entry[1], entry[2])
- vOriginRec.toXML(writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if not hasattr(self, "VOriginRecords"):
- self.VOriginRecords = {}
- self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID
- if name == "VOriginRecord":
- vOriginRec = VOriginRecord()
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- vOriginRec.fromXML(name, attrs, content, ttFont)
- self.VOriginRecords[vOriginRec.glyphName] = vOriginRec.vOrigin
- elif "value" in attrs:
- setattr(self, name, safeEval(attrs["value"]))
-
- def __getitem__(self, glyphSelector):
- if isinstance(glyphSelector, int):
- # its a gid, convert to glyph name
- glyphSelector = self.getGlyphName(glyphSelector)
-
- if glyphSelector not in self.VOriginRecords:
- return self.defaultVertOriginY
-
- return self.VOriginRecords[glyphSelector]
-
- def __setitem__(self, glyphSelector, value):
- if isinstance(glyphSelector, int):
- # its a gid, convert to glyph name
- glyphSelector = self.getGlyphName(glyphSelector)
-
- if value != self.defaultVertOriginY:
- self.VOriginRecords[glyphSelector] = value
- elif glyphSelector in self.VOriginRecords:
- del self.VOriginRecords[glyphSelector]
-
- def __delitem__(self, glyphSelector):
- del self.VOriginRecords[glyphSelector]
+ """This table is structured so that you can treat it like a dictionary keyed by glyph name.
+
+ ``ttFont['VORG'][<glyphName>]`` will return the vertical origin for any glyph.
+
+ ``ttFont['VORG'][<glyphName>] = <value>`` will set the vertical origin for any glyph.
+ """
+
+ def decompile(self, data, ttFont):
+ self.getGlyphName = (
+ ttFont.getGlyphName
+ ) # for use in get/set item functions, for access by GID
+ (
+ self.majorVersion,
+ self.minorVersion,
+ self.defaultVertOriginY,
+ self.numVertOriginYMetrics,
+ ) = struct.unpack(">HHhH", data[:8])
+ assert (
+ self.majorVersion <= 1
+ ), "Major version of VORG table is higher than I know how to handle"
+ data = data[8:]
+ vids = []
+ gids = []
+ pos = 0
+ for i in range(self.numVertOriginYMetrics):
+ gid, vOrigin = struct.unpack(">Hh", data[pos : pos + 4])
+ pos += 4
+ gids.append(gid)
+ vids.append(vOrigin)
+
+ self.VOriginRecords = vOrig = {}
+ glyphOrder = ttFont.getGlyphOrder()
+ try:
+ names = [glyphOrder[gid] for gid in gids]
+ except IndexError:
+ getGlyphName = self.getGlyphName
+ names = map(getGlyphName, gids)
+
+ for name, vid in zip(names, vids):
+ vOrig[name] = vid
+
+ def compile(self, ttFont):
+ vorgs = list(self.VOriginRecords.values())
+ names = list(self.VOriginRecords.keys())
+ nameMap = ttFont.getReverseGlyphMap()
+ try:
+ gids = [nameMap[name] for name in names]
+ except KeyError:
+ nameMap = ttFont.getReverseGlyphMap(rebuild=True)
+ gids = [nameMap[name] for name in names]
+ vOriginTable = list(zip(gids, vorgs))
+ self.numVertOriginYMetrics = len(vorgs)
+ vOriginTable.sort() # must be in ascending GID order
+ dataList = [struct.pack(">Hh", rec[0], rec[1]) for rec in vOriginTable]
+ header = struct.pack(
+ ">HHhH",
+ self.majorVersion,
+ self.minorVersion,
+ self.defaultVertOriginY,
+ self.numVertOriginYMetrics,
+ )
+ dataList.insert(0, header)
+ data = bytesjoin(dataList)
+ return data
+
+ def toXML(self, writer, ttFont):
+ writer.simpletag("majorVersion", value=self.majorVersion)
+ writer.newline()
+ writer.simpletag("minorVersion", value=self.minorVersion)
+ writer.newline()
+ writer.simpletag("defaultVertOriginY", value=self.defaultVertOriginY)
+ writer.newline()
+ writer.simpletag("numVertOriginYMetrics", value=self.numVertOriginYMetrics)
+ writer.newline()
+ vOriginTable = []
+ glyphNames = self.VOriginRecords.keys()
+ for glyphName in glyphNames:
+ try:
+ gid = ttFont.getGlyphID(glyphName)
+ except:
+ assert 0, (
+ "VORG table contains a glyph name not in ttFont.getGlyphNames(): "
+ + str(glyphName)
+ )
+ vOriginTable.append([gid, glyphName, self.VOriginRecords[glyphName]])
+ vOriginTable.sort()
+ for entry in vOriginTable:
+ vOriginRec = VOriginRecord(entry[1], entry[2])
+ vOriginRec.toXML(writer, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if not hasattr(self, "VOriginRecords"):
+ self.VOriginRecords = {}
+ self.getGlyphName = (
+ ttFont.getGlyphName
+ ) # for use in get/set item functions, for access by GID
+ if name == "VOriginRecord":
+ vOriginRec = VOriginRecord()
+ for element in content:
+ if isinstance(element, str):
+ continue
+ name, attrs, content = element
+ vOriginRec.fromXML(name, attrs, content, ttFont)
+ self.VOriginRecords[vOriginRec.glyphName] = vOriginRec.vOrigin
+ elif "value" in attrs:
+ setattr(self, name, safeEval(attrs["value"]))
+
+ def __getitem__(self, glyphSelector):
+ if isinstance(glyphSelector, int):
+ # its a gid, convert to glyph name
+ glyphSelector = self.getGlyphName(glyphSelector)
+
+ if glyphSelector not in self.VOriginRecords:
+ return self.defaultVertOriginY
+
+ return self.VOriginRecords[glyphSelector]
+
+ def __setitem__(self, glyphSelector, value):
+ if isinstance(glyphSelector, int):
+ # its a gid, convert to glyph name
+ glyphSelector = self.getGlyphName(glyphSelector)
+
+ if value != self.defaultVertOriginY:
+ self.VOriginRecords[glyphSelector] = value
+ elif glyphSelector in self.VOriginRecords:
+ del self.VOriginRecords[glyphSelector]
+
+ def __delitem__(self, glyphSelector):
+ del self.VOriginRecords[glyphSelector]
-class VOriginRecord(object):
- def __init__(self, name=None, vOrigin=None):
- self.glyphName = name
- self.vOrigin = vOrigin
-
- def toXML(self, writer, ttFont):
- writer.begintag("VOriginRecord")
- writer.newline()
- writer.simpletag("glyphName", value=self.glyphName)
- writer.newline()
- writer.simpletag("vOrigin", value=self.vOrigin)
- writer.newline()
- writer.endtag("VOriginRecord")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- value = attrs["value"]
- if name == "glyphName":
- setattr(self, name, value)
- else:
- setattr(self, name, safeEval(value))
+class VOriginRecord(object):
+ def __init__(self, name=None, vOrigin=None):
+ self.glyphName = name
+ self.vOrigin = vOrigin
+
+ def toXML(self, writer, ttFont):
+ writer.begintag("VOriginRecord")
+ writer.newline()
+ writer.simpletag("glyphName", value=self.glyphName)
+ writer.newline()
+ writer.simpletag("vOrigin", value=self.vOrigin)
+ writer.newline()
+ writer.endtag("VOriginRecord")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ value = attrs["value"]
+ if name == "glyphName":
+ setattr(self, name, value)
+ else:
+ setattr(self, name, safeEval(value))
diff --git a/Lib/fontTools/ttLib/tables/V_V_A_R_.py b/Lib/fontTools/ttLib/tables/V_V_A_R_.py
index 88f30552..a3665fea 100644
--- a/Lib/fontTools/ttLib/tables/V_V_A_R_.py
+++ b/Lib/fontTools/ttLib/tables/V_V_A_R_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_V_V_A_R_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/__init__.py b/Lib/fontTools/ttLib/tables/__init__.py
index bbfb8b70..f4cba26b 100644
--- a/Lib/fontTools/ttLib/tables/__init__.py
+++ b/Lib/fontTools/ttLib/tables/__init__.py
@@ -1,95 +1,96 @@
-
# DON'T EDIT! This file is generated by MetaTools/buildTableList.py.
def _moduleFinderHint():
- """Dummy function to let modulefinder know what tables may be
- dynamically imported. Generated by MetaTools/buildTableList.py.
+ """Dummy function to let modulefinder know what tables may be
+ dynamically imported. Generated by MetaTools/buildTableList.py.
+
+ >>> _moduleFinderHint()
+ """
+ from . import B_A_S_E_
+ from . import C_B_D_T_
+ from . import C_B_L_C_
+ from . import C_F_F_
+ from . import C_F_F__2
+ from . import C_O_L_R_
+ from . import C_P_A_L_
+ from . import D_S_I_G_
+ from . import D__e_b_g
+ from . import E_B_D_T_
+ from . import E_B_L_C_
+ from . import F_F_T_M_
+ from . import F__e_a_t
+ from . import G_D_E_F_
+ from . import G_M_A_P_
+ from . import G_P_K_G_
+ from . import G_P_O_S_
+ from . import G_S_U_B_
+ from . import G__l_a_t
+ from . import G__l_o_c
+ from . import H_V_A_R_
+ from . import J_S_T_F_
+ from . import L_T_S_H_
+ from . import M_A_T_H_
+ from . import M_E_T_A_
+ from . import M_V_A_R_
+ from . import O_S_2f_2
+ from . import S_I_N_G_
+ from . import S_T_A_T_
+ from . import S_V_G_
+ from . import S__i_l_f
+ from . import S__i_l_l
+ from . import T_S_I_B_
+ from . import T_S_I_C_
+ from . import T_S_I_D_
+ from . import T_S_I_J_
+ from . import T_S_I_P_
+ from . import T_S_I_S_
+ from . import T_S_I_V_
+ from . import T_S_I__0
+ from . import T_S_I__1
+ from . import T_S_I__2
+ from . import T_S_I__3
+ from . import T_S_I__5
+ from . import T_T_F_A_
+ from . import V_D_M_X_
+ from . import V_O_R_G_
+ from . import V_V_A_R_
+ from . import _a_n_k_r
+ from . import _a_v_a_r
+ from . import _b_s_l_n
+ from . import _c_i_d_g
+ from . import _c_m_a_p
+ from . import _c_v_a_r
+ from . import _c_v_t
+ from . import _f_e_a_t
+ from . import _f_p_g_m
+ from . import _f_v_a_r
+ from . import _g_a_s_p
+ from . import _g_c_i_d
+ from . import _g_l_y_f
+ from . import _g_v_a_r
+ from . import _h_d_m_x
+ from . import _h_e_a_d
+ from . import _h_h_e_a
+ from . import _h_m_t_x
+ from . import _k_e_r_n
+ from . import _l_c_a_r
+ from . import _l_o_c_a
+ from . import _l_t_a_g
+ from . import _m_a_x_p
+ from . import _m_e_t_a
+ from . import _m_o_r_t
+ from . import _m_o_r_x
+ from . import _n_a_m_e
+ from . import _o_p_b_d
+ from . import _p_o_s_t
+ from . import _p_r_e_p
+ from . import _p_r_o_p
+ from . import _s_b_i_x
+ from . import _t_r_a_k
+ from . import _v_h_e_a
+ from . import _v_m_t_x
- >>> _moduleFinderHint()
- """
- from . import B_A_S_E_
- from . import C_B_D_T_
- from . import C_B_L_C_
- from . import C_F_F_
- from . import C_F_F__2
- from . import C_O_L_R_
- from . import C_P_A_L_
- from . import D_S_I_G_
- from . import D__e_b_g
- from . import E_B_D_T_
- from . import E_B_L_C_
- from . import F_F_T_M_
- from . import F__e_a_t
- from . import G_D_E_F_
- from . import G_M_A_P_
- from . import G_P_K_G_
- from . import G_P_O_S_
- from . import G_S_U_B_
- from . import G__l_a_t
- from . import G__l_o_c
- from . import H_V_A_R_
- from . import J_S_T_F_
- from . import L_T_S_H_
- from . import M_A_T_H_
- from . import M_E_T_A_
- from . import M_V_A_R_
- from . import O_S_2f_2
- from . import S_I_N_G_
- from . import S_T_A_T_
- from . import S_V_G_
- from . import S__i_l_f
- from . import S__i_l_l
- from . import T_S_I_B_
- from . import T_S_I_C_
- from . import T_S_I_D_
- from . import T_S_I_J_
- from . import T_S_I_P_
- from . import T_S_I_S_
- from . import T_S_I_V_
- from . import T_S_I__0
- from . import T_S_I__1
- from . import T_S_I__2
- from . import T_S_I__3
- from . import T_S_I__5
- from . import T_T_F_A_
- from . import V_D_M_X_
- from . import V_O_R_G_
- from . import V_V_A_R_
- from . import _a_n_k_r
- from . import _a_v_a_r
- from . import _b_s_l_n
- from . import _c_i_d_g
- from . import _c_m_a_p
- from . import _c_v_a_r
- from . import _c_v_t
- from . import _f_e_a_t
- from . import _f_p_g_m
- from . import _f_v_a_r
- from . import _g_a_s_p
- from . import _g_c_i_d
- from . import _g_l_y_f
- from . import _g_v_a_r
- from . import _h_d_m_x
- from . import _h_e_a_d
- from . import _h_h_e_a
- from . import _h_m_t_x
- from . import _k_e_r_n
- from . import _l_c_a_r
- from . import _l_o_c_a
- from . import _l_t_a_g
- from . import _m_a_x_p
- from . import _m_e_t_a
- from . import _m_o_r_t
- from . import _m_o_r_x
- from . import _n_a_m_e
- from . import _o_p_b_d
- from . import _p_o_s_t
- from . import _p_r_e_p
- from . import _p_r_o_p
- from . import _s_b_i_x
- from . import _t_r_a_k
- from . import _v_h_e_a
- from . import _v_m_t_x
if __name__ == "__main__":
- import doctest, sys
- sys.exit(doctest.testmod().failed)
+ import doctest, sys
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/ttLib/tables/_a_n_k_r.py b/Lib/fontTools/ttLib/tables/_a_n_k_r.py
index 16f5c184..d1062ecc 100644
--- a/Lib/fontTools/ttLib/tables/_a_n_k_r.py
+++ b/Lib/fontTools/ttLib/tables/_a_n_k_r.py
@@ -1,5 +1,6 @@
from .otBase import BaseTTXConverter
+
class table__a_n_k_r(BaseTTXConverter):
"""
The anchor point table provides a way to define anchor points.
@@ -9,4 +10,5 @@ class table__a_n_k_r(BaseTTXConverter):
See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ankr.html
"""
+
pass
diff --git a/Lib/fontTools/ttLib/tables/_a_v_a_r.py b/Lib/fontTools/ttLib/tables/_a_v_a_r.py
index 16f2a219..39039cf7 100644
--- a/Lib/fontTools/ttLib/tables/_a_v_a_r.py
+++ b/Lib/fontTools/ttLib/tables/_a_v_a_r.py
@@ -5,29 +5,20 @@ from fontTools.misc.fixedTools import (
floatToFixedToStr as fl2str,
strToFixedToFloat as str2fl,
)
-from fontTools.misc.textTools import bytesjoin
+from fontTools.misc.textTools import bytesjoin, safeEval
from fontTools.ttLib import TTLibError
from . import DefaultTable
+from . import otTables
import struct
import logging
log = logging.getLogger(__name__)
-# Apple's documentation of 'avar':
-# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6avar.html
+from .otBase import BaseTTXConverter
-AVAR_HEADER_FORMAT = """
- > # big endian
- majorVersion: H
- minorVersion: H
- reserved: H
- axisCount: H
-"""
-assert sstruct.calcsize(AVAR_HEADER_FORMAT) == 8, sstruct.calcsize(AVAR_HEADER_FORMAT)
-
-class table__a_v_a_r(DefaultTable.DefaultTable):
+class table__a_v_a_r(BaseTTXConverter):
"""Axis Variations Table
This class represents the ``avar`` table of a variable font. The object has one
@@ -54,46 +45,53 @@ class table__a_v_a_r(DefaultTable.DefaultTable):
dependencies = ["fvar"]
def __init__(self, tag=None):
- DefaultTable.DefaultTable.__init__(self, tag)
+ super().__init__(tag)
self.segments = {}
def compile(self, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
- header = {
- "majorVersion": 1,
- "minorVersion": 0,
- "reserved": 0,
- "axisCount": len(axisTags)
- }
- result = [sstruct.pack(AVAR_HEADER_FORMAT, header)]
+ if not hasattr(self, "table"):
+ self.table = otTables.avar()
+ if not hasattr(self.table, "Reserved"):
+ self.table.Reserved = 0
+ self.table.Version = (getattr(self, "majorVersion", 1) << 16) | getattr(
+ self, "minorVersion", 0
+ )
+ self.table.AxisCount = len(axisTags)
+ self.table.AxisSegmentMap = []
for axis in axisTags:
- mappings = sorted(self.segments[axis].items())
- result.append(struct.pack(">H", len(mappings)))
- for key, value in mappings:
- fixedKey = fl2fi(key, 14)
- fixedValue = fl2fi(value, 14)
- result.append(struct.pack(">hh", fixedKey, fixedValue))
- return bytesjoin(result)
+ mappings = self.segments[axis]
+ segmentMap = otTables.AxisSegmentMap()
+ segmentMap.PositionMapCount = len(mappings)
+ segmentMap.AxisValueMap = []
+ for key, value in sorted(mappings.items()):
+ valueMap = otTables.AxisValueMap()
+ valueMap.FromCoordinate = key
+ valueMap.ToCoordinate = value
+ segmentMap.AxisValueMap.append(valueMap)
+ self.table.AxisSegmentMap.append(segmentMap)
+ return super().compile(ttFont)
def decompile(self, data, ttFont):
+ super().decompile(data, ttFont)
+ assert self.table.Version >= 0x00010000
+ self.majorVersion = self.table.Version >> 16
+ self.minorVersion = self.table.Version & 0xFFFF
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
- header = {}
- headerSize = sstruct.calcsize(AVAR_HEADER_FORMAT)
- header = sstruct.unpack(AVAR_HEADER_FORMAT, data[0:headerSize])
- majorVersion = header["majorVersion"]
- if majorVersion != 1:
- raise TTLibError("unsupported 'avar' version %d" % majorVersion)
- pos = headerSize
for axis in axisTags:
+ self.segments[axis] = {}
+ for axis, segmentMap in zip(axisTags, self.table.AxisSegmentMap):
segments = self.segments[axis] = {}
- numPairs = struct.unpack(">H", data[pos:pos+2])[0]
- pos = pos + 2
- for _ in range(numPairs):
- fromValue, toValue = struct.unpack(">hh", data[pos:pos+4])
- segments[fi2fl(fromValue, 14)] = fi2fl(toValue, 14)
- pos = pos + 4
+ for segment in segmentMap.AxisValueMap:
+ segments[segment.FromCoordinate] = segment.ToCoordinate
def toXML(self, writer, ttFont):
+ writer.simpletag(
+ "version",
+ major=getattr(self, "majorVersion", 1),
+ minor=getattr(self, "minorVersion", 0),
+ )
+ writer.newline()
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
for axis in axisTags:
writer.begintag("segment", axis=axis)
@@ -105,9 +103,24 @@ class table__a_v_a_r(DefaultTable.DefaultTable):
writer.newline()
writer.endtag("segment")
writer.newline()
+ if getattr(self, "majorVersion", 1) >= 2:
+ if self.table.VarIdxMap:
+ self.table.VarIdxMap.toXML(writer, ttFont, name="VarIdxMap")
+ if self.table.VarStore:
+ self.table.VarStore.toXML(writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
- if name == "segment":
+ if not hasattr(self, "table"):
+ self.table = otTables.avar()
+ if not hasattr(self.table, "Reserved"):
+ self.table.Reserved = 0
+ if name == "version":
+ self.majorVersion = safeEval(attrs["major"])
+ self.minorVersion = safeEval(attrs["minor"])
+ self.table.Version = (getattr(self, "majorVersion", 1) << 16) | getattr(
+ self, "minorVersion", 0
+ )
+ elif name == "segment":
axis = attrs["axis"]
segment = self.segments[axis] = {}
for element in content:
@@ -117,6 +130,9 @@ class table__a_v_a_r(DefaultTable.DefaultTable):
fromValue = str2fl(elementAttrs["from"], 14)
toValue = str2fl(elementAttrs["to"], 14)
if fromValue in segment:
- log.warning("duplicate entry for %s in axis '%s'",
- fromValue, axis)
+ log.warning(
+ "duplicate entry for %s in axis '%s'", fromValue, axis
+ )
segment[fromValue] = toValue
+ else:
+ super().fromXML(name, attrs, content, ttFont)
diff --git a/Lib/fontTools/ttLib/tables/_c_i_d_g.py b/Lib/fontTools/ttLib/tables/_c_i_d_g.py
index 2517e785..f11901ba 100644
--- a/Lib/fontTools/ttLib/tables/_c_i_d_g.py
+++ b/Lib/fontTools/ttLib/tables/_c_i_d_g.py
@@ -4,16 +4,16 @@ from .otBase import BaseTTXConverter
class table__c_i_d_g(BaseTTXConverter):
"""The AAT ``cidg`` table has almost the same structure as ``gidc``,
-just mapping CIDs to GlyphIDs instead of the reverse direction.
+ just mapping CIDs to GlyphIDs instead of the reverse direction.
-It is useful for fonts that may be used by a PDF renderer in lieu of
-a font reference with a known glyph collection but no subsetted
-glyphs. For instance, a PDF can say “please use a font conforming
-to Adobe-Japan-1”; the ``cidg`` mapping is necessary if the font is,
-say, a TrueType font. ``gidc`` is lossy for this purpose and is
-obsoleted by ``cidg``.
+ It is useful for fonts that may be used by a PDF renderer in lieu of
+ a font reference with a known glyph collection but no subsetted
+ glyphs. For instance, a PDF can say “please use a font conforming
+ to Adobe-Japan-1”; the ``cidg`` mapping is necessary if the font is,
+ say, a TrueType font. ``gidc`` is lossy for this purpose and is
+ obsoleted by ``cidg``.
+
+ For example, the first font in ``/System/Library/Fonts/PingFang.ttc``
+ (which Apple ships pre-installed on MacOS 10.12.6) has a ``cidg`` table."""
-For example, the first font in ``/System/Library/Fonts/PingFang.ttc``
-(which Apple ships pre-installed on MacOS 10.12.6) has a ``cidg`` table.
-"""
pass
diff --git a/Lib/fontTools/ttLib/tables/_c_m_a_p.py b/Lib/fontTools/ttLib/tables/_c_m_a_p.py
index ef2b5758..484c331c 100644
--- a/Lib/fontTools/ttLib/tables/_c_m_a_p.py
+++ b/Lib/fontTools/ttLib/tables/_c_m_a_p.py
@@ -13,1379 +13,1564 @@ log = logging.getLogger(__name__)
def _make_map(font, chars, gids):
- assert len(chars) == len(gids)
- glyphNames = font.getGlyphNameMany(gids)
- cmap = {}
- for char,gid,name in zip(chars,gids,glyphNames):
- if gid == 0:
- continue
- cmap[char] = name
- return cmap
+ assert len(chars) == len(gids)
+ glyphNames = font.getGlyphNameMany(gids)
+ cmap = {}
+ for char, gid, name in zip(chars, gids, glyphNames):
+ if gid == 0:
+ continue
+ cmap[char] = name
+ return cmap
+
class table__c_m_a_p(DefaultTable.DefaultTable):
- """Character to Glyph Index Mapping Table
-
- This class represents the `cmap <https://docs.microsoft.com/en-us/typography/opentype/spec/cmap>`_
- table, which maps between input characters (in Unicode or other system encodings)
- and glyphs within the font. The ``cmap`` table contains one or more subtables
- which determine the mapping of of characters to glyphs across different platforms
- and encoding systems.
-
- ``table__c_m_a_p`` objects expose an accessor ``.tables`` which provides access
- to the subtables, although it is normally easier to retrieve individual subtables
- through the utility methods described below. To add new subtables to a font,
- first determine the subtable format (if in doubt use format 4 for glyphs within
- the BMP, format 12 for glyphs outside the BMP, and format 14 for Unicode Variation
- Sequences) construct subtable objects with ``CmapSubtable.newSubtable(format)``,
- and append them to the ``.tables`` list.
-
- Within a subtable, the mapping of characters to glyphs is provided by the ``.cmap``
- attribute.
-
- Example::
-
- cmap4_0_3 = CmapSubtable.newSubtable(4)
- cmap4_0_3.platformID = 0
- cmap4_0_3.platEncID = 3
- cmap4_0_3.language = 0
- cmap4_0_3.cmap = { 0xC1: "Aacute" }
-
- cmap = newTable("cmap")
- cmap.tableVersion = 0
- cmap.tables = [cmap4_0_3]
- """
-
- def getcmap(self, platformID, platEncID):
- """Returns the first subtable which matches the given platform and encoding.
-
- Args:
- platformID (int): The platform ID. Use 0 for Unicode, 1 for Macintosh
- (deprecated for new fonts), 2 for ISO (deprecated) and 3 for Windows.
- encodingID (int): Encoding ID. Interpretation depends on the platform ID.
- See the OpenType specification for details.
-
- Returns:
- An object which is a subclass of :py:class:`CmapSubtable` if a matching
- subtable is found within the font, or ``None`` otherwise.
- """
-
- for subtable in self.tables:
- if (subtable.platformID == platformID and
- subtable.platEncID == platEncID):
- return subtable
- return None # not found
-
- def getBestCmap(self, cmapPreferences=((3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0))):
- """Returns the 'best' Unicode cmap dictionary available in the font
- or ``None``, if no Unicode cmap subtable is available.
-
- By default it will search for the following (platformID, platEncID)
- pairs in order::
-
- (3, 10), # Windows Unicode full repertoire
- (0, 6), # Unicode full repertoire (format 13 subtable)
- (0, 4), # Unicode 2.0 full repertoire
- (3, 1), # Windows Unicode BMP
- (0, 3), # Unicode 2.0 BMP
- (0, 2), # Unicode ISO/IEC 10646
- (0, 1), # Unicode 1.1
- (0, 0) # Unicode 1.0
-
- This particular order matches what HarfBuzz uses to choose what
- subtable to use by default. This order prefers the largest-repertoire
- subtable, and among those, prefers the Windows-platform over the
- Unicode-platform as the former has wider support.
-
- This order can be customized via the ``cmapPreferences`` argument.
- """
- for platformID, platEncID in cmapPreferences:
- cmapSubtable = self.getcmap(platformID, platEncID)
- if cmapSubtable is not None:
- return cmapSubtable.cmap
- return None # None of the requested cmap subtables were found
-
- def buildReversed(self):
- """Builds a reverse mapping dictionary
-
- Iterates over all Unicode cmap tables and returns a dictionary mapping
- glyphs to sets of codepoints, such as::
-
- {
- 'one': {0x31}
- 'A': {0x41,0x391}
- }
-
- The values are sets of Unicode codepoints because
- some fonts map different codepoints to the same glyph.
- For example, ``U+0041 LATIN CAPITAL LETTER A`` and ``U+0391
- GREEK CAPITAL LETTER ALPHA`` are sometimes the same glyph.
- """
- result = {}
- for subtable in self.tables:
- if subtable.isUnicode():
- for codepoint, name in subtable.cmap.items():
- result.setdefault(name, set()).add(codepoint)
- return result
-
- def decompile(self, data, ttFont):
- tableVersion, numSubTables = struct.unpack(">HH", data[:4])
- self.tableVersion = int(tableVersion)
- self.tables = tables = []
- seenOffsets = {}
- for i in range(numSubTables):
- platformID, platEncID, offset = struct.unpack(
- ">HHl", data[4+i*8:4+(i+1)*8])
- platformID, platEncID = int(platformID), int(platEncID)
- format, length = struct.unpack(">HH", data[offset:offset+4])
- if format in [8,10,12,13]:
- format, reserved, length = struct.unpack(">HHL", data[offset:offset+8])
- elif format in [14]:
- format, length = struct.unpack(">HL", data[offset:offset+6])
-
- if not length:
- log.error(
- "cmap subtable is reported as having zero length: platformID %s, "
- "platEncID %s, format %s offset %s. Skipping table.",
- platformID, platEncID, format, offset)
- continue
- table = CmapSubtable.newSubtable(format)
- table.platformID = platformID
- table.platEncID = platEncID
- # Note that by default we decompile only the subtable header info;
- # any other data gets decompiled only when an attribute of the
- # subtable is referenced.
- table.decompileHeader(data[offset:offset+int(length)], ttFont)
- if offset in seenOffsets:
- table.data = None # Mark as decompiled
- table.cmap = tables[seenOffsets[offset]].cmap
- else:
- seenOffsets[offset] = i
- tables.append(table)
- if ttFont.lazy is False: # Be lazy for None and True
- self.ensureDecompiled()
-
- def ensureDecompiled(self, recurse=False):
- # The recurse argument is unused, but part of the signature of
- # ensureDecompiled across the library.
- for st in self.tables:
- st.ensureDecompiled()
-
- def compile(self, ttFont):
- self.tables.sort() # sort according to the spec; see CmapSubtable.__lt__()
- numSubTables = len(self.tables)
- totalOffset = 4 + 8 * numSubTables
- data = struct.pack(">HH", self.tableVersion, numSubTables)
- tableData = b""
- seen = {} # Some tables are the same object reference. Don't compile them twice.
- done = {} # Some tables are different objects, but compile to the same data chunk
- for table in self.tables:
- offset = seen.get(id(table.cmap))
- if offset is None:
- chunk = table.compile(ttFont)
- offset = done.get(chunk)
- if offset is None:
- offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len(tableData)
- tableData = tableData + chunk
- data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset)
- return data + tableData
-
- def toXML(self, writer, ttFont):
- writer.simpletag("tableVersion", version=self.tableVersion)
- writer.newline()
- for table in self.tables:
- table.toXML(writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "tableVersion":
- self.tableVersion = safeEval(attrs["version"])
- return
- if name[:12] != "cmap_format_":
- return
- if not hasattr(self, "tables"):
- self.tables = []
- format = safeEval(name[12:])
- table = CmapSubtable.newSubtable(format)
- table.platformID = safeEval(attrs["platformID"])
- table.platEncID = safeEval(attrs["platEncID"])
- table.fromXML(name, attrs, content, ttFont)
- self.tables.append(table)
+ """Character to Glyph Index Mapping Table
+
+ This class represents the `cmap <https://docs.microsoft.com/en-us/typography/opentype/spec/cmap>`_
+ table, which maps between input characters (in Unicode or other system encodings)
+ and glyphs within the font. The ``cmap`` table contains one or more subtables
+ which determine the mapping of of characters to glyphs across different platforms
+ and encoding systems.
+
+ ``table__c_m_a_p`` objects expose an accessor ``.tables`` which provides access
+ to the subtables, although it is normally easier to retrieve individual subtables
+ through the utility methods described below. To add new subtables to a font,
+ first determine the subtable format (if in doubt use format 4 for glyphs within
+ the BMP, format 12 for glyphs outside the BMP, and format 14 for Unicode Variation
+ Sequences) construct subtable objects with ``CmapSubtable.newSubtable(format)``,
+ and append them to the ``.tables`` list.
+
+ Within a subtable, the mapping of characters to glyphs is provided by the ``.cmap``
+ attribute.
+
+ Example::
+
+ cmap4_0_3 = CmapSubtable.newSubtable(4)
+ cmap4_0_3.platformID = 0
+ cmap4_0_3.platEncID = 3
+ cmap4_0_3.language = 0
+ cmap4_0_3.cmap = { 0xC1: "Aacute" }
+
+ cmap = newTable("cmap")
+ cmap.tableVersion = 0
+ cmap.tables = [cmap4_0_3]
+ """
+
+ def getcmap(self, platformID, platEncID):
+ """Returns the first subtable which matches the given platform and encoding.
+
+ Args:
+ platformID (int): The platform ID. Use 0 for Unicode, 1 for Macintosh
+ (deprecated for new fonts), 2 for ISO (deprecated) and 3 for Windows.
+ encodingID (int): Encoding ID. Interpretation depends on the platform ID.
+ See the OpenType specification for details.
+
+ Returns:
+ An object which is a subclass of :py:class:`CmapSubtable` if a matching
+ subtable is found within the font, or ``None`` otherwise.
+ """
+
+ for subtable in self.tables:
+ if subtable.platformID == platformID and subtable.platEncID == platEncID:
+ return subtable
+ return None # not found
+
+ def getBestCmap(
+ self,
+ cmapPreferences=(
+ (3, 10),
+ (0, 6),
+ (0, 4),
+ (3, 1),
+ (0, 3),
+ (0, 2),
+ (0, 1),
+ (0, 0),
+ ),
+ ):
+ """Returns the 'best' Unicode cmap dictionary available in the font
+ or ``None``, if no Unicode cmap subtable is available.
+
+ By default it will search for the following (platformID, platEncID)
+ pairs in order::
+
+ (3, 10), # Windows Unicode full repertoire
+ (0, 6), # Unicode full repertoire (format 13 subtable)
+ (0, 4), # Unicode 2.0 full repertoire
+ (3, 1), # Windows Unicode BMP
+ (0, 3), # Unicode 2.0 BMP
+ (0, 2), # Unicode ISO/IEC 10646
+ (0, 1), # Unicode 1.1
+ (0, 0) # Unicode 1.0
+
+ This particular order matches what HarfBuzz uses to choose what
+ subtable to use by default. This order prefers the largest-repertoire
+ subtable, and among those, prefers the Windows-platform over the
+ Unicode-platform as the former has wider support.
+
+ This order can be customized via the ``cmapPreferences`` argument.
+ """
+ for platformID, platEncID in cmapPreferences:
+ cmapSubtable = self.getcmap(platformID, platEncID)
+ if cmapSubtable is not None:
+ return cmapSubtable.cmap
+ return None # None of the requested cmap subtables were found
+
+ def buildReversed(self):
+ """Builds a reverse mapping dictionary
+
+ Iterates over all Unicode cmap tables and returns a dictionary mapping
+ glyphs to sets of codepoints, such as::
+
+ {
+ 'one': {0x31}
+ 'A': {0x41,0x391}
+ }
+
+ The values are sets of Unicode codepoints because
+ some fonts map different codepoints to the same glyph.
+ For example, ``U+0041 LATIN CAPITAL LETTER A`` and ``U+0391
+ GREEK CAPITAL LETTER ALPHA`` are sometimes the same glyph.
+ """
+ result = {}
+ for subtable in self.tables:
+ if subtable.isUnicode():
+ for codepoint, name in subtable.cmap.items():
+ result.setdefault(name, set()).add(codepoint)
+ return result
+
+ def decompile(self, data, ttFont):
+ tableVersion, numSubTables = struct.unpack(">HH", data[:4])
+ self.tableVersion = int(tableVersion)
+ self.tables = tables = []
+ seenOffsets = {}
+ for i in range(numSubTables):
+ platformID, platEncID, offset = struct.unpack(
+ ">HHl", data[4 + i * 8 : 4 + (i + 1) * 8]
+ )
+ platformID, platEncID = int(platformID), int(platEncID)
+ format, length = struct.unpack(">HH", data[offset : offset + 4])
+ if format in [8, 10, 12, 13]:
+ format, reserved, length = struct.unpack(
+ ">HHL", data[offset : offset + 8]
+ )
+ elif format in [14]:
+ format, length = struct.unpack(">HL", data[offset : offset + 6])
+
+ if not length:
+ log.error(
+ "cmap subtable is reported as having zero length: platformID %s, "
+ "platEncID %s, format %s offset %s. Skipping table.",
+ platformID,
+ platEncID,
+ format,
+ offset,
+ )
+ continue
+ table = CmapSubtable.newSubtable(format)
+ table.platformID = platformID
+ table.platEncID = platEncID
+ # Note that by default we decompile only the subtable header info;
+ # any other data gets decompiled only when an attribute of the
+ # subtable is referenced.
+ table.decompileHeader(data[offset : offset + int(length)], ttFont)
+ if offset in seenOffsets:
+ table.data = None # Mark as decompiled
+ table.cmap = tables[seenOffsets[offset]].cmap
+ else:
+ seenOffsets[offset] = i
+ tables.append(table)
+ if ttFont.lazy is False: # Be lazy for None and True
+ self.ensureDecompiled()
+
+ def ensureDecompiled(self, recurse=False):
+ # The recurse argument is unused, but part of the signature of
+ # ensureDecompiled across the library.
+ for st in self.tables:
+ st.ensureDecompiled()
+
+ def compile(self, ttFont):
+ self.tables.sort() # sort according to the spec; see CmapSubtable.__lt__()
+ numSubTables = len(self.tables)
+ totalOffset = 4 + 8 * numSubTables
+ data = struct.pack(">HH", self.tableVersion, numSubTables)
+ tableData = b""
+ seen = (
+ {}
+ ) # Some tables are the same object reference. Don't compile them twice.
+ done = (
+ {}
+ ) # Some tables are different objects, but compile to the same data chunk
+ for table in self.tables:
+ offset = seen.get(id(table.cmap))
+ if offset is None:
+ chunk = table.compile(ttFont)
+ offset = done.get(chunk)
+ if offset is None:
+ offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len(
+ tableData
+ )
+ tableData = tableData + chunk
+ data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset)
+ return data + tableData
+
+ def toXML(self, writer, ttFont):
+ writer.simpletag("tableVersion", version=self.tableVersion)
+ writer.newline()
+ for table in self.tables:
+ table.toXML(writer, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "tableVersion":
+ self.tableVersion = safeEval(attrs["version"])
+ return
+ if name[:12] != "cmap_format_":
+ return
+ if not hasattr(self, "tables"):
+ self.tables = []
+ format = safeEval(name[12:])
+ table = CmapSubtable.newSubtable(format)
+ table.platformID = safeEval(attrs["platformID"])
+ table.platEncID = safeEval(attrs["platEncID"])
+ table.fromXML(name, attrs, content, ttFont)
+ self.tables.append(table)
class CmapSubtable(object):
- """Base class for all cmap subtable formats.
-
- Subclasses which handle the individual subtable formats are named
- ``cmap_format_0``, ``cmap_format_2`` etc. Use :py:meth:`getSubtableClass`
- to retrieve the concrete subclass, or :py:meth:`newSubtable` to get a
- new subtable object for a given format.
-
- The object exposes a ``.cmap`` attribute, which contains a dictionary mapping
- character codepoints to glyph names.
- """
-
- @staticmethod
- def getSubtableClass(format):
- """Return the subtable class for a format."""
- return cmap_classes.get(format, cmap_format_unknown)
-
- @staticmethod
- def newSubtable(format):
- """Return a new instance of a subtable for the given format
- ."""
- subtableClass = CmapSubtable.getSubtableClass(format)
- return subtableClass(format)
-
- def __init__(self, format):
- self.format = format
- self.data = None
- self.ttFont = None
- self.platformID = None #: The platform ID of this subtable
- self.platEncID = None #: The encoding ID of this subtable (interpretation depends on ``platformID``)
- self.language = None #: The language ID of this subtable (Macintosh platform only)
-
- def ensureDecompiled(self, recurse=False):
- # The recurse argument is unused, but part of the signature of
- # ensureDecompiled across the library.
- if self.data is None:
- return
- self.decompile(None, None) # use saved data.
- self.data = None # Once this table has been decompiled, make sure we don't
- # just return the original data. Also avoids recursion when
- # called with an attribute that the cmap subtable doesn't have.
-
- def __getattr__(self, attr):
- # allow lazy decompilation of subtables.
- if attr[:2] == '__': # don't handle requests for member functions like '__lt__'
- raise AttributeError(attr)
- if self.data is None:
- raise AttributeError(attr)
- self.ensureDecompiled()
- return getattr(self, attr)
-
- def decompileHeader(self, data, ttFont):
- format, length, language = struct.unpack(">HHH", data[:6])
- assert len(data) == length, "corrupt cmap table format %d (data length: %d, header length: %d)" % (format, len(data), length)
- self.format = int(format)
- self.length = int(length)
- self.language = int(language)
- self.data = data[6:]
- self.ttFont = ttFont
-
- def toXML(self, writer, ttFont):
- writer.begintag(self.__class__.__name__, [
- ("platformID", self.platformID),
- ("platEncID", self.platEncID),
- ("language", self.language),
- ])
- writer.newline()
- codes = sorted(self.cmap.items())
- self._writeCodes(codes, writer)
- writer.endtag(self.__class__.__name__)
- writer.newline()
-
- def getEncoding(self, default=None):
- """Returns the Python encoding name for this cmap subtable based on its platformID,
- platEncID, and language. If encoding for these values is not known, by default
- ``None`` is returned. That can be overridden by passing a value to the ``default``
- argument.
-
- Note that if you want to choose a "preferred" cmap subtable, most of the time
- ``self.isUnicode()`` is what you want as that one only returns true for the modern,
- commonly used, Unicode-compatible triplets, not the legacy ones.
- """
- return getEncoding(self.platformID, self.platEncID, self.language, default)
-
- def isUnicode(self):
- """Returns true if the characters are interpreted as Unicode codepoints."""
- return (self.platformID == 0 or
- (self.platformID == 3 and self.platEncID in [0, 1, 10]))
-
- def isSymbol(self):
- """Returns true if the subtable is for the Symbol encoding (3,0)"""
- return self.platformID == 3 and self.platEncID == 0
-
- def _writeCodes(self, codes, writer):
- isUnicode = self.isUnicode()
- for code, name in codes:
- writer.simpletag("map", code=hex(code), name=name)
- if isUnicode:
- writer.comment(Unicode[code])
- writer.newline()
-
- def __lt__(self, other):
- if not isinstance(other, CmapSubtable):
- return NotImplemented
-
- # implemented so that list.sort() sorts according to the spec.
- selfTuple = (
- getattr(self, "platformID", None),
- getattr(self, "platEncID", None),
- getattr(self, "language", None),
- self.__dict__)
- otherTuple = (
- getattr(other, "platformID", None),
- getattr(other, "platEncID", None),
- getattr(other, "language", None),
- other.__dict__)
- return selfTuple < otherTuple
+ """Base class for all cmap subtable formats.
+
+ Subclasses which handle the individual subtable formats are named
+ ``cmap_format_0``, ``cmap_format_2`` etc. Use :py:meth:`getSubtableClass`
+ to retrieve the concrete subclass, or :py:meth:`newSubtable` to get a
+ new subtable object for a given format.
+
+ The object exposes a ``.cmap`` attribute, which contains a dictionary mapping
+ character codepoints to glyph names.
+ """
+
+ @staticmethod
+ def getSubtableClass(format):
+ """Return the subtable class for a format."""
+ return cmap_classes.get(format, cmap_format_unknown)
+
+ @staticmethod
+ def newSubtable(format):
+ """Return a new instance of a subtable for the given format
+ ."""
+ subtableClass = CmapSubtable.getSubtableClass(format)
+ return subtableClass(format)
+
+ def __init__(self, format):
+ self.format = format
+ self.data = None
+ self.ttFont = None
+ self.platformID = None #: The platform ID of this subtable
+ self.platEncID = None #: The encoding ID of this subtable (interpretation depends on ``platformID``)
+ self.language = (
+ None #: The language ID of this subtable (Macintosh platform only)
+ )
+
+ def ensureDecompiled(self, recurse=False):
+ # The recurse argument is unused, but part of the signature of
+ # ensureDecompiled across the library.
+ if self.data is None:
+ return
+ self.decompile(None, None) # use saved data.
+ self.data = None # Once this table has been decompiled, make sure we don't
+ # just return the original data. Also avoids recursion when
+ # called with an attribute that the cmap subtable doesn't have.
+
+ def __getattr__(self, attr):
+ # allow lazy decompilation of subtables.
+ if attr[:2] == "__": # don't handle requests for member functions like '__lt__'
+ raise AttributeError(attr)
+ if self.data is None:
+ raise AttributeError(attr)
+ self.ensureDecompiled()
+ return getattr(self, attr)
+
+ def decompileHeader(self, data, ttFont):
+ format, length, language = struct.unpack(">HHH", data[:6])
+ assert (
+ len(data) == length
+ ), "corrupt cmap table format %d (data length: %d, header length: %d)" % (
+ format,
+ len(data),
+ length,
+ )
+ self.format = int(format)
+ self.length = int(length)
+ self.language = int(language)
+ self.data = data[6:]
+ self.ttFont = ttFont
+
+ def toXML(self, writer, ttFont):
+ writer.begintag(
+ self.__class__.__name__,
+ [
+ ("platformID", self.platformID),
+ ("platEncID", self.platEncID),
+ ("language", self.language),
+ ],
+ )
+ writer.newline()
+ codes = sorted(self.cmap.items())
+ self._writeCodes(codes, writer)
+ writer.endtag(self.__class__.__name__)
+ writer.newline()
+
+ def getEncoding(self, default=None):
+ """Returns the Python encoding name for this cmap subtable based on its platformID,
+ platEncID, and language. If encoding for these values is not known, by default
+ ``None`` is returned. That can be overridden by passing a value to the ``default``
+ argument.
+
+ Note that if you want to choose a "preferred" cmap subtable, most of the time
+ ``self.isUnicode()`` is what you want as that one only returns true for the modern,
+ commonly used, Unicode-compatible triplets, not the legacy ones.
+ """
+ return getEncoding(self.platformID, self.platEncID, self.language, default)
+
+ def isUnicode(self):
+ """Returns true if the characters are interpreted as Unicode codepoints."""
+ return self.platformID == 0 or (
+ self.platformID == 3 and self.platEncID in [0, 1, 10]
+ )
+
+ def isSymbol(self):
+ """Returns true if the subtable is for the Symbol encoding (3,0)"""
+ return self.platformID == 3 and self.platEncID == 0
+
+ def _writeCodes(self, codes, writer):
+ isUnicode = self.isUnicode()
+ for code, name in codes:
+ writer.simpletag("map", code=hex(code), name=name)
+ if isUnicode:
+ writer.comment(Unicode[code])
+ writer.newline()
+
+ def __lt__(self, other):
+ if not isinstance(other, CmapSubtable):
+ return NotImplemented
+
+ # implemented so that list.sort() sorts according to the spec.
+ selfTuple = (
+ getattr(self, "platformID", None),
+ getattr(self, "platEncID", None),
+ getattr(self, "language", None),
+ self.__dict__,
+ )
+ otherTuple = (
+ getattr(other, "platformID", None),
+ getattr(other, "platEncID", None),
+ getattr(other, "language", None),
+ other.__dict__,
+ )
+ return selfTuple < otherTuple
class cmap_format_0(CmapSubtable):
-
- def decompile(self, data, ttFont):
- # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
- # If not, someone is calling the subtable decompile() directly, and must provide both args.
- if data is not None and ttFont is not None:
- self.decompileHeader(data, ttFont)
- else:
- assert (data is None and ttFont is None), "Need both data and ttFont arguments"
- data = self.data # decompileHeader assigns the data after the header to self.data
- assert 262 == self.length, "Format 0 cmap subtable not 262 bytes"
- gids = array.array("B")
- gids.frombytes(self.data)
- charCodes = list(range(len(gids)))
- self.cmap = _make_map(self.ttFont, charCodes, gids)
-
- def compile(self, ttFont):
- if self.data:
- return struct.pack(">HHH", 0, 262, self.language) + self.data
-
- cmap = self.cmap
- assert set(cmap.keys()).issubset(range(256))
- getGlyphID = ttFont.getGlyphID
- valueList = [getGlyphID(cmap[i]) if i in cmap else 0 for i in range(256)]
-
- gids = array.array("B", valueList)
- data = struct.pack(">HHH", 0, 262, self.language) + gids.tobytes()
- assert len(data) == 262
- return data
-
- def fromXML(self, name, attrs, content, ttFont):
- self.language = safeEval(attrs["language"])
- if not hasattr(self, "cmap"):
- self.cmap = {}
- cmap = self.cmap
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name != "map":
- continue
- cmap[safeEval(attrs["code"])] = attrs["name"]
+ def decompile(self, data, ttFont):
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
+ if data is not None and ttFont is not None:
+ self.decompileHeader(data, ttFont)
+ else:
+ assert (
+ data is None and ttFont is None
+ ), "Need both data and ttFont arguments"
+ data = (
+ self.data
+ ) # decompileHeader assigns the data after the header to self.data
+ assert 262 == self.length, "Format 0 cmap subtable not 262 bytes"
+ gids = array.array("B")
+ gids.frombytes(self.data)
+ charCodes = list(range(len(gids)))
+ self.cmap = _make_map(self.ttFont, charCodes, gids)
+
+ def compile(self, ttFont):
+ if self.data:
+ return struct.pack(">HHH", 0, 262, self.language) + self.data
+
+ cmap = self.cmap
+ assert set(cmap.keys()).issubset(range(256))
+ getGlyphID = ttFont.getGlyphID
+ valueList = [getGlyphID(cmap[i]) if i in cmap else 0 for i in range(256)]
+
+ gids = array.array("B", valueList)
+ data = struct.pack(">HHH", 0, 262, self.language) + gids.tobytes()
+ assert len(data) == 262
+ return data
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.language = safeEval(attrs["language"])
+ if not hasattr(self, "cmap"):
+ self.cmap = {}
+ cmap = self.cmap
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name != "map":
+ continue
+ cmap[safeEval(attrs["code"])] = attrs["name"]
subHeaderFormat = ">HHhH"
+
+
class SubHeader(object):
- def __init__(self):
- self.firstCode = None
- self.entryCount = None
- self.idDelta = None
- self.idRangeOffset = None
- self.glyphIndexArray = []
+ def __init__(self):
+ self.firstCode = None
+ self.entryCount = None
+ self.idDelta = None
+ self.idRangeOffset = None
+ self.glyphIndexArray = []
-class cmap_format_2(CmapSubtable):
- def setIDDelta(self, subHeader):
- subHeader.idDelta = 0
- # find the minGI which is not zero.
- minGI = subHeader.glyphIndexArray[0]
- for gid in subHeader.glyphIndexArray:
- if (gid != 0) and (gid < minGI):
- minGI = gid
- # The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1.
- # idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K.
- # We would like to pick an idDelta such that the first glyphArray GID is 1,
- # so that we are more likely to be able to combine glypharray GID subranges.
- # This means that we have a problem when minGI is > 32K
- # Since the final gi is reconstructed from the glyphArray GID by:
- # (short)finalGID = (gid + idDelta) % 0x10000),
- # we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the
- # negative number to an unsigned short.
-
- if (minGI > 1):
- if minGI > 0x7FFF:
- subHeader.idDelta = -(0x10000 - minGI) -1
- else:
- subHeader.idDelta = minGI -1
- idDelta = subHeader.idDelta
- for i in range(subHeader.entryCount):
- gid = subHeader.glyphIndexArray[i]
- if gid > 0:
- subHeader.glyphIndexArray[i] = gid - idDelta
-
- def decompile(self, data, ttFont):
- # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
- # If not, someone is calling the subtable decompile() directly, and must provide both args.
- if data is not None and ttFont is not None:
- self.decompileHeader(data, ttFont)
- else:
- assert (data is None and ttFont is None), "Need both data and ttFont arguments"
-
- data = self.data # decompileHeader assigns the data after the header to self.data
- subHeaderKeys = []
- maxSubHeaderindex = 0
- # get the key array, and determine the number of subHeaders.
- allKeys = array.array("H")
- allKeys.frombytes(data[:512])
- data = data[512:]
- if sys.byteorder != "big": allKeys.byteswap()
- subHeaderKeys = [ key//8 for key in allKeys]
- maxSubHeaderindex = max(subHeaderKeys)
-
- #Load subHeaders
- subHeaderList = []
- pos = 0
- for i in range(maxSubHeaderindex + 1):
- subHeader = SubHeader()
- (subHeader.firstCode, subHeader.entryCount, subHeader.idDelta, \
- subHeader.idRangeOffset) = struct.unpack(subHeaderFormat, data[pos:pos + 8])
- pos += 8
- giDataPos = pos + subHeader.idRangeOffset-2
- giList = array.array("H")
- giList.frombytes(data[giDataPos:giDataPos + subHeader.entryCount*2])
- if sys.byteorder != "big": giList.byteswap()
- subHeader.glyphIndexArray = giList
- subHeaderList.append(subHeader)
- # How this gets processed.
- # Charcodes may be one or two bytes.
- # The first byte of a charcode is mapped through the subHeaderKeys, to select
- # a subHeader. For any subheader but 0, the next byte is then mapped through the
- # selected subheader. If subheader Index 0 is selected, then the byte itself is
- # mapped through the subheader, and there is no second byte.
- # Then assume that the subsequent byte is the first byte of the next charcode,and repeat.
- #
- # Each subheader references a range in the glyphIndexArray whose length is entryCount.
- # The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray
- # referenced by another subheader.
- # The only subheader that will be referenced by more than one first-byte value is the subheader
- # that maps the entire range of glyphID values to glyphIndex 0, e.g notdef:
- # {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx}
- # A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex.
- # A subheader specifies a subrange within (0...256) by the
- # firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero
- # (e.g. glyph not in font).
- # If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar).
- # The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by
- # counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the
- # glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex.
- # Example for Logocut-Medium
- # first byte of charcode = 129; selects subheader 1.
- # subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252}
- # second byte of charCode = 66
- # the index offset = 66-64 = 2.
- # The subrange of the glyphIndexArray starting at 0x0252 bytes from the idRangeOffset word is:
- # [glyphIndexArray index], [subrange array index] = glyphIndex
- # [256], [0]=1 from charcode [129, 64]
- # [257], [1]=2 from charcode [129, 65]
- # [258], [2]=3 from charcode [129, 66]
- # [259], [3]=4 from charcode [129, 67]
- # So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero,
- # add it to the glyphID to get the final glyphIndex
- # value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew!
-
- self.data = b""
- cmap = {}
- notdefGI = 0
- for firstByte in range(256):
- subHeadindex = subHeaderKeys[firstByte]
- subHeader = subHeaderList[subHeadindex]
- if subHeadindex == 0:
- if (firstByte < subHeader.firstCode) or (firstByte >= subHeader.firstCode + subHeader.entryCount):
- continue # gi is notdef.
- else:
- charCode = firstByte
- offsetIndex = firstByte - subHeader.firstCode
- gi = subHeader.glyphIndexArray[offsetIndex]
- if gi != 0:
- gi = (gi + subHeader.idDelta) % 0x10000
- else:
- continue # gi is notdef.
- cmap[charCode] = gi
- else:
- if subHeader.entryCount:
- charCodeOffset = firstByte * 256 + subHeader.firstCode
- for offsetIndex in range(subHeader.entryCount):
- charCode = charCodeOffset + offsetIndex
- gi = subHeader.glyphIndexArray[offsetIndex]
- if gi != 0:
- gi = (gi + subHeader.idDelta) % 0x10000
- else:
- continue
- cmap[charCode] = gi
- # If not subHeader.entryCount, then all char codes with this first byte are
- # mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the
- # same as mapping it to .notdef.
-
- gids = list(cmap.values())
- charCodes = list(cmap.keys())
- self.cmap = _make_map(self.ttFont, charCodes, gids)
-
- def compile(self, ttFont):
- if self.data:
- return struct.pack(">HHH", self.format, self.length, self.language) + self.data
- kEmptyTwoCharCodeRange = -1
- notdefGI = 0
-
- items = sorted(self.cmap.items())
- charCodes = [item[0] for item in items]
- names = [item[1] for item in items]
- nameMap = ttFont.getReverseGlyphMap()
- try:
- gids = [nameMap[name] for name in names]
- except KeyError:
- nameMap = ttFont.getReverseGlyphMap(rebuild=True)
- try:
- gids = [nameMap[name] for name in names]
- except KeyError:
- # allow virtual GIDs in format 2 tables
- gids = []
- for name in names:
- try:
- gid = nameMap[name]
- except KeyError:
- try:
- if (name[:3] == 'gid'):
- gid = int(name[3:])
- else:
- gid = ttFont.getGlyphID(name)
- except:
- raise KeyError(name)
-
- gids.append(gid)
-
- # Process the (char code to gid) item list in char code order.
- # By definition, all one byte char codes map to subheader 0.
- # For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0,
- # which defines all char codes in its range to map to notdef) unless proven otherwise.
- # Note that since the char code items are processed in char code order, all the char codes with the
- # same first byte are in sequential order.
-
- subHeaderKeys = [kEmptyTwoCharCodeRange for x in range(256)] # list of indices into subHeaderList.
- subHeaderList = []
-
- # We force this subheader entry 0 to exist in the subHeaderList in the case where some one comes up
- # with a cmap where all the one byte char codes map to notdef,
- # with the result that the subhead 0 would not get created just by processing the item list.
- charCode = charCodes[0]
- if charCode > 255:
- subHeader = SubHeader()
- subHeader.firstCode = 0
- subHeader.entryCount = 0
- subHeader.idDelta = 0
- subHeader.idRangeOffset = 0
- subHeaderList.append(subHeader)
-
- lastFirstByte = -1
- items = zip(charCodes, gids)
- for charCode, gid in items:
- if gid == 0:
- continue
- firstbyte = charCode >> 8
- secondByte = charCode & 0x00FF
-
- if firstbyte != lastFirstByte: # Need to update the current subhead, and start a new one.
- if lastFirstByte > -1:
- # fix GI's and iDelta of current subheader.
- self.setIDDelta(subHeader)
-
- # If it was sunheader 0 for one-byte charCodes, then we need to set the subHeaderKeys value to zero
- # for the indices matching the char codes.
- if lastFirstByte == 0:
- for index in range(subHeader.entryCount):
- charCode = subHeader.firstCode + index
- subHeaderKeys[charCode] = 0
-
- assert (subHeader.entryCount == len(subHeader.glyphIndexArray)), "Error - subhead entry count does not match len of glyphID subrange."
- # init new subheader
- subHeader = SubHeader()
- subHeader.firstCode = secondByte
- subHeader.entryCount = 1
- subHeader.glyphIndexArray.append(gid)
- subHeaderList.append(subHeader)
- subHeaderKeys[firstbyte] = len(subHeaderList) -1
- lastFirstByte = firstbyte
- else:
- # need to fill in with notdefs all the code points between the last charCode and the current charCode.
- codeDiff = secondByte - (subHeader.firstCode + subHeader.entryCount)
- for i in range(codeDiff):
- subHeader.glyphIndexArray.append(notdefGI)
- subHeader.glyphIndexArray.append(gid)
- subHeader.entryCount = subHeader.entryCount + codeDiff + 1
-
- # fix GI's and iDelta of last subheader that we we added to the subheader array.
- self.setIDDelta(subHeader)
-
- # Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges.
- subHeader = SubHeader()
- subHeader.firstCode = 0
- subHeader.entryCount = 0
- subHeader.idDelta = 0
- subHeader.idRangeOffset = 2
- subHeaderList.append(subHeader)
- emptySubheadIndex = len(subHeaderList) - 1
- for index in range(256):
- if subHeaderKeys[index] == kEmptyTwoCharCodeRange:
- subHeaderKeys[index] = emptySubheadIndex
- # Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the
- # idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray,
- # since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with
- # charcode 0 and GID 0.
-
- idRangeOffset = (len(subHeaderList)-1)*8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset.
- subheadRangeLen = len(subHeaderList) -1 # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2.
- for index in range(subheadRangeLen):
- subHeader = subHeaderList[index]
- subHeader.idRangeOffset = 0
- for j in range(index):
- prevSubhead = subHeaderList[j]
- if prevSubhead.glyphIndexArray == subHeader.glyphIndexArray: # use the glyphIndexArray subarray
- subHeader.idRangeOffset = prevSubhead.idRangeOffset - (index-j)*8
- subHeader.glyphIndexArray = []
- break
- if subHeader.idRangeOffset == 0: # didn't find one.
- subHeader.idRangeOffset = idRangeOffset
- idRangeOffset = (idRangeOffset - 8) + subHeader.entryCount*2 # one less subheader, one more subArray.
- else:
- idRangeOffset = idRangeOffset - 8 # one less subheader
-
- # Now we can write out the data!
- length = 6 + 512 + 8*len(subHeaderList) # header, 256 subHeaderKeys, and subheader array.
- for subhead in subHeaderList[:-1]:
- length = length + len(subhead.glyphIndexArray)*2 # We can't use subhead.entryCount, as some of the subhead may share subArrays.
- dataList = [struct.pack(">HHH", 2, length, self.language)]
- for index in subHeaderKeys:
- dataList.append(struct.pack(">H", index*8))
- for subhead in subHeaderList:
- dataList.append(struct.pack(subHeaderFormat, subhead.firstCode, subhead.entryCount, subhead.idDelta, subhead.idRangeOffset))
- for subhead in subHeaderList[:-1]:
- for gi in subhead.glyphIndexArray:
- dataList.append(struct.pack(">H", gi))
- data = bytesjoin(dataList)
- assert (len(data) == length), "Error: cmap format 2 is not same length as calculated! actual: " + str(len(data))+ " calc : " + str(length)
- return data
-
- def fromXML(self, name, attrs, content, ttFont):
- self.language = safeEval(attrs["language"])
- if not hasattr(self, "cmap"):
- self.cmap = {}
- cmap = self.cmap
-
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name != "map":
- continue
- cmap[safeEval(attrs["code"])] = attrs["name"]
+class cmap_format_2(CmapSubtable):
+ def setIDDelta(self, subHeader):
+ subHeader.idDelta = 0
+ # find the minGI which is not zero.
+ minGI = subHeader.glyphIndexArray[0]
+ for gid in subHeader.glyphIndexArray:
+ if (gid != 0) and (gid < minGI):
+ minGI = gid
+ # The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1.
+ # idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K.
+ # We would like to pick an idDelta such that the first glyphArray GID is 1,
+ # so that we are more likely to be able to combine glypharray GID subranges.
+ # This means that we have a problem when minGI is > 32K
+ # Since the final gi is reconstructed from the glyphArray GID by:
+ # (short)finalGID = (gid + idDelta) % 0x10000),
+ # we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the
+ # negative number to an unsigned short.
+
+ if minGI > 1:
+ if minGI > 0x7FFF:
+ subHeader.idDelta = -(0x10000 - minGI) - 1
+ else:
+ subHeader.idDelta = minGI - 1
+ idDelta = subHeader.idDelta
+ for i in range(subHeader.entryCount):
+ gid = subHeader.glyphIndexArray[i]
+ if gid > 0:
+ subHeader.glyphIndexArray[i] = gid - idDelta
+
+ def decompile(self, data, ttFont):
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
+ if data is not None and ttFont is not None:
+ self.decompileHeader(data, ttFont)
+ else:
+ assert (
+ data is None and ttFont is None
+ ), "Need both data and ttFont arguments"
+
+ data = (
+ self.data
+ ) # decompileHeader assigns the data after the header to self.data
+ subHeaderKeys = []
+ maxSubHeaderindex = 0
+ # get the key array, and determine the number of subHeaders.
+ allKeys = array.array("H")
+ allKeys.frombytes(data[:512])
+ data = data[512:]
+ if sys.byteorder != "big":
+ allKeys.byteswap()
+ subHeaderKeys = [key // 8 for key in allKeys]
+ maxSubHeaderindex = max(subHeaderKeys)
+
+ # Load subHeaders
+ subHeaderList = []
+ pos = 0
+ for i in range(maxSubHeaderindex + 1):
+ subHeader = SubHeader()
+ (
+ subHeader.firstCode,
+ subHeader.entryCount,
+ subHeader.idDelta,
+ subHeader.idRangeOffset,
+ ) = struct.unpack(subHeaderFormat, data[pos : pos + 8])
+ pos += 8
+ giDataPos = pos + subHeader.idRangeOffset - 2
+ giList = array.array("H")
+ giList.frombytes(data[giDataPos : giDataPos + subHeader.entryCount * 2])
+ if sys.byteorder != "big":
+ giList.byteswap()
+ subHeader.glyphIndexArray = giList
+ subHeaderList.append(subHeader)
+ # How this gets processed.
+ # Charcodes may be one or two bytes.
+ # The first byte of a charcode is mapped through the subHeaderKeys, to select
+ # a subHeader. For any subheader but 0, the next byte is then mapped through the
+ # selected subheader. If subheader Index 0 is selected, then the byte itself is
+ # mapped through the subheader, and there is no second byte.
+ # Then assume that the subsequent byte is the first byte of the next charcode,and repeat.
+ #
+ # Each subheader references a range in the glyphIndexArray whose length is entryCount.
+ # The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray
+ # referenced by another subheader.
+ # The only subheader that will be referenced by more than one first-byte value is the subheader
+ # that maps the entire range of glyphID values to glyphIndex 0, e.g notdef:
+ # {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx}
+ # A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex.
+ # A subheader specifies a subrange within (0...256) by the
+ # firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero
+ # (e.g. glyph not in font).
+ # If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar).
+ # The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by
+ # counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the
+ # glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex.
+ # Example for Logocut-Medium
+ # first byte of charcode = 129; selects subheader 1.
+ # subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252}
+ # second byte of charCode = 66
+ # the index offset = 66-64 = 2.
+ # The subrange of the glyphIndexArray starting at 0x0252 bytes from the idRangeOffset word is:
+ # [glyphIndexArray index], [subrange array index] = glyphIndex
+ # [256], [0]=1 from charcode [129, 64]
+ # [257], [1]=2 from charcode [129, 65]
+ # [258], [2]=3 from charcode [129, 66]
+ # [259], [3]=4 from charcode [129, 67]
+ # So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero,
+ # add it to the glyphID to get the final glyphIndex
+ # value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew!
+
+ self.data = b""
+ cmap = {}
+ notdefGI = 0
+ for firstByte in range(256):
+ subHeadindex = subHeaderKeys[firstByte]
+ subHeader = subHeaderList[subHeadindex]
+ if subHeadindex == 0:
+ if (firstByte < subHeader.firstCode) or (
+ firstByte >= subHeader.firstCode + subHeader.entryCount
+ ):
+ continue # gi is notdef.
+ else:
+ charCode = firstByte
+ offsetIndex = firstByte - subHeader.firstCode
+ gi = subHeader.glyphIndexArray[offsetIndex]
+ if gi != 0:
+ gi = (gi + subHeader.idDelta) % 0x10000
+ else:
+ continue # gi is notdef.
+ cmap[charCode] = gi
+ else:
+ if subHeader.entryCount:
+ charCodeOffset = firstByte * 256 + subHeader.firstCode
+ for offsetIndex in range(subHeader.entryCount):
+ charCode = charCodeOffset + offsetIndex
+ gi = subHeader.glyphIndexArray[offsetIndex]
+ if gi != 0:
+ gi = (gi + subHeader.idDelta) % 0x10000
+ else:
+ continue
+ cmap[charCode] = gi
+ # If not subHeader.entryCount, then all char codes with this first byte are
+ # mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the
+ # same as mapping it to .notdef.
+
+ gids = list(cmap.values())
+ charCodes = list(cmap.keys())
+ self.cmap = _make_map(self.ttFont, charCodes, gids)
+
+ def compile(self, ttFont):
+ if self.data:
+ return (
+ struct.pack(">HHH", self.format, self.length, self.language) + self.data
+ )
+ kEmptyTwoCharCodeRange = -1
+ notdefGI = 0
+
+ items = sorted(self.cmap.items())
+ charCodes = [item[0] for item in items]
+ names = [item[1] for item in items]
+ nameMap = ttFont.getReverseGlyphMap()
+ try:
+ gids = [nameMap[name] for name in names]
+ except KeyError:
+ nameMap = ttFont.getReverseGlyphMap(rebuild=True)
+ try:
+ gids = [nameMap[name] for name in names]
+ except KeyError:
+ # allow virtual GIDs in format 2 tables
+ gids = []
+ for name in names:
+ try:
+ gid = nameMap[name]
+ except KeyError:
+ try:
+ if name[:3] == "gid":
+ gid = int(name[3:])
+ else:
+ gid = ttFont.getGlyphID(name)
+ except:
+ raise KeyError(name)
+
+ gids.append(gid)
+
+ # Process the (char code to gid) item list in char code order.
+ # By definition, all one byte char codes map to subheader 0.
+ # For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0,
+ # which defines all char codes in its range to map to notdef) unless proven otherwise.
+ # Note that since the char code items are processed in char code order, all the char codes with the
+ # same first byte are in sequential order.
+
+ subHeaderKeys = [
+ kEmptyTwoCharCodeRange for x in range(256)
+ ] # list of indices into subHeaderList.
+ subHeaderList = []
+
+ # We force this subheader entry 0 to exist in the subHeaderList in the case where some one comes up
+ # with a cmap where all the one byte char codes map to notdef,
+ # with the result that the subhead 0 would not get created just by processing the item list.
+ charCode = charCodes[0]
+ if charCode > 255:
+ subHeader = SubHeader()
+ subHeader.firstCode = 0
+ subHeader.entryCount = 0
+ subHeader.idDelta = 0
+ subHeader.idRangeOffset = 0
+ subHeaderList.append(subHeader)
+
+ lastFirstByte = -1
+ items = zip(charCodes, gids)
+ for charCode, gid in items:
+ if gid == 0:
+ continue
+ firstbyte = charCode >> 8
+ secondByte = charCode & 0x00FF
+
+ if (
+ firstbyte != lastFirstByte
+ ): # Need to update the current subhead, and start a new one.
+ if lastFirstByte > -1:
+ # fix GI's and iDelta of current subheader.
+ self.setIDDelta(subHeader)
+
+ # If it was sunheader 0 for one-byte charCodes, then we need to set the subHeaderKeys value to zero
+ # for the indices matching the char codes.
+ if lastFirstByte == 0:
+ for index in range(subHeader.entryCount):
+ charCode = subHeader.firstCode + index
+ subHeaderKeys[charCode] = 0
+
+ assert subHeader.entryCount == len(
+ subHeader.glyphIndexArray
+ ), "Error - subhead entry count does not match len of glyphID subrange."
+ # init new subheader
+ subHeader = SubHeader()
+ subHeader.firstCode = secondByte
+ subHeader.entryCount = 1
+ subHeader.glyphIndexArray.append(gid)
+ subHeaderList.append(subHeader)
+ subHeaderKeys[firstbyte] = len(subHeaderList) - 1
+ lastFirstByte = firstbyte
+ else:
+ # need to fill in with notdefs all the code points between the last charCode and the current charCode.
+ codeDiff = secondByte - (subHeader.firstCode + subHeader.entryCount)
+ for i in range(codeDiff):
+ subHeader.glyphIndexArray.append(notdefGI)
+ subHeader.glyphIndexArray.append(gid)
+ subHeader.entryCount = subHeader.entryCount + codeDiff + 1
+
+ # fix GI's and iDelta of last subheader that we we added to the subheader array.
+ self.setIDDelta(subHeader)
+
+ # Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges.
+ subHeader = SubHeader()
+ subHeader.firstCode = 0
+ subHeader.entryCount = 0
+ subHeader.idDelta = 0
+ subHeader.idRangeOffset = 2
+ subHeaderList.append(subHeader)
+ emptySubheadIndex = len(subHeaderList) - 1
+ for index in range(256):
+ if subHeaderKeys[index] == kEmptyTwoCharCodeRange:
+ subHeaderKeys[index] = emptySubheadIndex
+ # Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the
+ # idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray,
+ # since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with
+ # charcode 0 and GID 0.
+
+ idRangeOffset = (
+ len(subHeaderList) - 1
+ ) * 8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset.
+ subheadRangeLen = (
+ len(subHeaderList) - 1
+ ) # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2.
+ for index in range(subheadRangeLen):
+ subHeader = subHeaderList[index]
+ subHeader.idRangeOffset = 0
+ for j in range(index):
+ prevSubhead = subHeaderList[j]
+ if (
+ prevSubhead.glyphIndexArray == subHeader.glyphIndexArray
+ ): # use the glyphIndexArray subarray
+ subHeader.idRangeOffset = (
+ prevSubhead.idRangeOffset - (index - j) * 8
+ )
+ subHeader.glyphIndexArray = []
+ break
+ if subHeader.idRangeOffset == 0: # didn't find one.
+ subHeader.idRangeOffset = idRangeOffset
+ idRangeOffset = (
+ idRangeOffset - 8
+ ) + subHeader.entryCount * 2 # one less subheader, one more subArray.
+ else:
+ idRangeOffset = idRangeOffset - 8 # one less subheader
+
+ # Now we can write out the data!
+ length = (
+ 6 + 512 + 8 * len(subHeaderList)
+ ) # header, 256 subHeaderKeys, and subheader array.
+ for subhead in subHeaderList[:-1]:
+ length = (
+ length + len(subhead.glyphIndexArray) * 2
+ ) # We can't use subhead.entryCount, as some of the subhead may share subArrays.
+ dataList = [struct.pack(">HHH", 2, length, self.language)]
+ for index in subHeaderKeys:
+ dataList.append(struct.pack(">H", index * 8))
+ for subhead in subHeaderList:
+ dataList.append(
+ struct.pack(
+ subHeaderFormat,
+ subhead.firstCode,
+ subhead.entryCount,
+ subhead.idDelta,
+ subhead.idRangeOffset,
+ )
+ )
+ for subhead in subHeaderList[:-1]:
+ for gi in subhead.glyphIndexArray:
+ dataList.append(struct.pack(">H", gi))
+ data = bytesjoin(dataList)
+ assert len(data) == length, (
+ "Error: cmap format 2 is not same length as calculated! actual: "
+ + str(len(data))
+ + " calc : "
+ + str(length)
+ )
+ return data
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.language = safeEval(attrs["language"])
+ if not hasattr(self, "cmap"):
+ self.cmap = {}
+ cmap = self.cmap
+
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name != "map":
+ continue
+ cmap[safeEval(attrs["code"])] = attrs["name"]
cmap_format_4_format = ">7H"
-#uint16 endCode[segCount] # Ending character code for each segment, last = 0xFFFF.
-#uint16 reservedPad # This value should be zero
-#uint16 startCode[segCount] # Starting character code for each segment
-#uint16 idDelta[segCount] # Delta for all character codes in segment
-#uint16 idRangeOffset[segCount] # Offset in bytes to glyph indexArray, or 0
-#uint16 glyphIndexArray[variable] # Glyph index array
+# uint16 endCode[segCount] # Ending character code for each segment, last = 0xFFFF.
+# uint16 reservedPad # This value should be zero
+# uint16 startCode[segCount] # Starting character code for each segment
+# uint16 idDelta[segCount] # Delta for all character codes in segment
+# uint16 idRangeOffset[segCount] # Offset in bytes to glyph indexArray, or 0
+# uint16 glyphIndexArray[variable] # Glyph index array
+
def splitRange(startCode, endCode, cmap):
- # Try to split a range of character codes into subranges with consecutive
- # glyph IDs in such a way that the cmap4 subtable can be stored "most"
- # efficiently. I can't prove I've got the optimal solution, but it seems
- # to do well with the fonts I tested: none became bigger, many became smaller.
- if startCode == endCode:
- return [], [endCode]
-
- lastID = cmap[startCode]
- lastCode = startCode
- inOrder = None
- orderedBegin = None
- subRanges = []
-
- # Gather subranges in which the glyph IDs are consecutive.
- for code in range(startCode + 1, endCode + 1):
- glyphID = cmap[code]
-
- if glyphID - 1 == lastID:
- if inOrder is None or not inOrder:
- inOrder = 1
- orderedBegin = lastCode
- else:
- if inOrder:
- inOrder = 0
- subRanges.append((orderedBegin, lastCode))
- orderedBegin = None
-
- lastID = glyphID
- lastCode = code
-
- if inOrder:
- subRanges.append((orderedBegin, lastCode))
- assert lastCode == endCode
-
- # Now filter out those new subranges that would only make the data bigger.
- # A new segment cost 8 bytes, not using a new segment costs 2 bytes per
- # character.
- newRanges = []
- for b, e in subRanges:
- if b == startCode and e == endCode:
- break # the whole range, we're fine
- if b == startCode or e == endCode:
- threshold = 4 # split costs one more segment
- else:
- threshold = 8 # split costs two more segments
- if (e - b + 1) > threshold:
- newRanges.append((b, e))
- subRanges = newRanges
-
- if not subRanges:
- return [], [endCode]
-
- if subRanges[0][0] != startCode:
- subRanges.insert(0, (startCode, subRanges[0][0] - 1))
- if subRanges[-1][1] != endCode:
- subRanges.append((subRanges[-1][1] + 1, endCode))
-
- # Fill the "holes" in the segments list -- those are the segments in which
- # the glyph IDs are _not_ consecutive.
- i = 1
- while i < len(subRanges):
- if subRanges[i-1][1] + 1 != subRanges[i][0]:
- subRanges.insert(i, (subRanges[i-1][1] + 1, subRanges[i][0] - 1))
- i = i + 1
- i = i + 1
-
- # Transform the ranges into startCode/endCode lists.
- start = []
- end = []
- for b, e in subRanges:
- start.append(b)
- end.append(e)
- start.pop(0)
-
- assert len(start) + 1 == len(end)
- return start, end
+ # Try to split a range of character codes into subranges with consecutive
+ # glyph IDs in such a way that the cmap4 subtable can be stored "most"
+ # efficiently. I can't prove I've got the optimal solution, but it seems
+ # to do well with the fonts I tested: none became bigger, many became smaller.
+ if startCode == endCode:
+ return [], [endCode]
+
+ lastID = cmap[startCode]
+ lastCode = startCode
+ inOrder = None
+ orderedBegin = None
+ subRanges = []
+
+ # Gather subranges in which the glyph IDs are consecutive.
+ for code in range(startCode + 1, endCode + 1):
+ glyphID = cmap[code]
+
+ if glyphID - 1 == lastID:
+ if inOrder is None or not inOrder:
+ inOrder = 1
+ orderedBegin = lastCode
+ else:
+ if inOrder:
+ inOrder = 0
+ subRanges.append((orderedBegin, lastCode))
+ orderedBegin = None
+
+ lastID = glyphID
+ lastCode = code
+
+ if inOrder:
+ subRanges.append((orderedBegin, lastCode))
+ assert lastCode == endCode
+
+ # Now filter out those new subranges that would only make the data bigger.
+ # A new segment cost 8 bytes, not using a new segment costs 2 bytes per
+ # character.
+ newRanges = []
+ for b, e in subRanges:
+ if b == startCode and e == endCode:
+ break # the whole range, we're fine
+ if b == startCode or e == endCode:
+ threshold = 4 # split costs one more segment
+ else:
+ threshold = 8 # split costs two more segments
+ if (e - b + 1) > threshold:
+ newRanges.append((b, e))
+ subRanges = newRanges
+
+ if not subRanges:
+ return [], [endCode]
+
+ if subRanges[0][0] != startCode:
+ subRanges.insert(0, (startCode, subRanges[0][0] - 1))
+ if subRanges[-1][1] != endCode:
+ subRanges.append((subRanges[-1][1] + 1, endCode))
+
+ # Fill the "holes" in the segments list -- those are the segments in which
+ # the glyph IDs are _not_ consecutive.
+ i = 1
+ while i < len(subRanges):
+ if subRanges[i - 1][1] + 1 != subRanges[i][0]:
+ subRanges.insert(i, (subRanges[i - 1][1] + 1, subRanges[i][0] - 1))
+ i = i + 1
+ i = i + 1
+
+ # Transform the ranges into startCode/endCode lists.
+ start = []
+ end = []
+ for b, e in subRanges:
+ start.append(b)
+ end.append(e)
+ start.pop(0)
+
+ assert len(start) + 1 == len(end)
+ return start, end
class cmap_format_4(CmapSubtable):
-
- def decompile(self, data, ttFont):
- # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
- # If not, someone is calling the subtable decompile() directly, and must provide both args.
- if data is not None and ttFont is not None:
- self.decompileHeader(data, ttFont)
- else:
- assert (data is None and ttFont is None), "Need both data and ttFont arguments"
-
- data = self.data # decompileHeader assigns the data after the header to self.data
- (segCountX2, searchRange, entrySelector, rangeShift) = \
- struct.unpack(">4H", data[:8])
- data = data[8:]
- segCount = segCountX2 // 2
-
- allCodes = array.array("H")
- allCodes.frombytes(data)
- self.data = data = None
-
- if sys.byteorder != "big": allCodes.byteswap()
-
- # divide the data
- endCode = allCodes[:segCount]
- allCodes = allCodes[segCount+1:] # the +1 is skipping the reservedPad field
- startCode = allCodes[:segCount]
- allCodes = allCodes[segCount:]
- idDelta = allCodes[:segCount]
- allCodes = allCodes[segCount:]
- idRangeOffset = allCodes[:segCount]
- glyphIndexArray = allCodes[segCount:]
- lenGIArray = len(glyphIndexArray)
-
- # build 2-byte character mapping
- charCodes = []
- gids = []
- for i in range(len(startCode) - 1): # don't do 0xffff!
- start = startCode[i]
- delta = idDelta[i]
- rangeOffset = idRangeOffset[i]
- partial = rangeOffset // 2 - start + i - len(idRangeOffset)
-
- rangeCharCodes = list(range(startCode[i], endCode[i] + 1))
- charCodes.extend(rangeCharCodes)
- if rangeOffset == 0:
- gids.extend([(charCode + delta) & 0xFFFF for charCode in rangeCharCodes])
- else:
- for charCode in rangeCharCodes:
- index = charCode + partial
- assert (index < lenGIArray), "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array is not less than the length of the array (%d) !" % (i, index, lenGIArray)
- if glyphIndexArray[index] != 0: # if not missing glyph
- glyphID = glyphIndexArray[index] + delta
- else:
- glyphID = 0 # missing glyph
- gids.append(glyphID & 0xFFFF)
-
- self.cmap = _make_map(self.ttFont, charCodes, gids)
-
- def compile(self, ttFont):
- if self.data:
- return struct.pack(">HHH", self.format, self.length, self.language) + self.data
-
- charCodes = list(self.cmap.keys())
- if not charCodes:
- startCode = [0xffff]
- endCode = [0xffff]
- else:
- charCodes.sort()
- names = [self.cmap[code] for code in charCodes]
- nameMap = ttFont.getReverseGlyphMap()
- try:
- gids = [nameMap[name] for name in names]
- except KeyError:
- nameMap = ttFont.getReverseGlyphMap(rebuild=True)
- try:
- gids = [nameMap[name] for name in names]
- except KeyError:
- # allow virtual GIDs in format 4 tables
- gids = []
- for name in names:
- try:
- gid = nameMap[name]
- except KeyError:
- try:
- if (name[:3] == 'gid'):
- gid = int(name[3:])
- else:
- gid = ttFont.getGlyphID(name)
- except:
- raise KeyError(name)
-
- gids.append(gid)
- cmap = {} # code:glyphID mapping
- for code, gid in zip(charCodes, gids):
- cmap[code] = gid
-
- # Build startCode and endCode lists.
- # Split the char codes in ranges of consecutive char codes, then split
- # each range in more ranges of consecutive/not consecutive glyph IDs.
- # See splitRange().
- lastCode = charCodes[0]
- endCode = []
- startCode = [lastCode]
- for charCode in charCodes[1:]: # skip the first code, it's the first start code
- if charCode == lastCode + 1:
- lastCode = charCode
- continue
- start, end = splitRange(startCode[-1], lastCode, cmap)
- startCode.extend(start)
- endCode.extend(end)
- startCode.append(charCode)
- lastCode = charCode
- start, end = splitRange(startCode[-1], lastCode, cmap)
- startCode.extend(start)
- endCode.extend(end)
- startCode.append(0xffff)
- endCode.append(0xffff)
-
- # build up rest of cruft
- idDelta = []
- idRangeOffset = []
- glyphIndexArray = []
- for i in range(len(endCode)-1): # skip the closing codes (0xffff)
- indices = []
- for charCode in range(startCode[i], endCode[i] + 1):
- indices.append(cmap[charCode])
- if (indices == list(range(indices[0], indices[0] + len(indices)))):
- idDelta.append((indices[0] - startCode[i]) % 0x10000)
- idRangeOffset.append(0)
- else:
- idDelta.append(0)
- idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i))
- glyphIndexArray.extend(indices)
- idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef
- idRangeOffset.append(0)
-
- # Insane.
- segCount = len(endCode)
- segCountX2 = segCount * 2
- searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2)
-
- charCodeArray = array.array("H", endCode + [0] + startCode)
- idDeltaArray = array.array("H", idDelta)
- restArray = array.array("H", idRangeOffset + glyphIndexArray)
- if sys.byteorder != "big": charCodeArray.byteswap()
- if sys.byteorder != "big": idDeltaArray.byteswap()
- if sys.byteorder != "big": restArray.byteswap()
- data = charCodeArray.tobytes() + idDeltaArray.tobytes() + restArray.tobytes()
-
- length = struct.calcsize(cmap_format_4_format) + len(data)
- header = struct.pack(cmap_format_4_format, self.format, length, self.language,
- segCountX2, searchRange, entrySelector, rangeShift)
- return header + data
-
- def fromXML(self, name, attrs, content, ttFont):
- self.language = safeEval(attrs["language"])
- if not hasattr(self, "cmap"):
- self.cmap = {}
- cmap = self.cmap
-
- for element in content:
- if not isinstance(element, tuple):
- continue
- nameMap, attrsMap, dummyContent = element
- if nameMap != "map":
- assert 0, "Unrecognized keyword in cmap subtable"
- cmap[safeEval(attrsMap["code"])] = attrsMap["name"]
+ def decompile(self, data, ttFont):
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
+ if data is not None and ttFont is not None:
+ self.decompileHeader(data, ttFont)
+ else:
+ assert (
+ data is None and ttFont is None
+ ), "Need both data and ttFont arguments"
+
+ data = (
+ self.data
+ ) # decompileHeader assigns the data after the header to self.data
+ (segCountX2, searchRange, entrySelector, rangeShift) = struct.unpack(
+ ">4H", data[:8]
+ )
+ data = data[8:]
+ segCount = segCountX2 // 2
+
+ allCodes = array.array("H")
+ allCodes.frombytes(data)
+ self.data = data = None
+
+ if sys.byteorder != "big":
+ allCodes.byteswap()
+
+ # divide the data
+ endCode = allCodes[:segCount]
+ allCodes = allCodes[segCount + 1 :] # the +1 is skipping the reservedPad field
+ startCode = allCodes[:segCount]
+ allCodes = allCodes[segCount:]
+ idDelta = allCodes[:segCount]
+ allCodes = allCodes[segCount:]
+ idRangeOffset = allCodes[:segCount]
+ glyphIndexArray = allCodes[segCount:]
+ lenGIArray = len(glyphIndexArray)
+
+ # build 2-byte character mapping
+ charCodes = []
+ gids = []
+ for i in range(len(startCode) - 1): # don't do 0xffff!
+ start = startCode[i]
+ delta = idDelta[i]
+ rangeOffset = idRangeOffset[i]
+ partial = rangeOffset // 2 - start + i - len(idRangeOffset)
+
+ rangeCharCodes = list(range(startCode[i], endCode[i] + 1))
+ charCodes.extend(rangeCharCodes)
+ if rangeOffset == 0:
+ gids.extend(
+ [(charCode + delta) & 0xFFFF for charCode in rangeCharCodes]
+ )
+ else:
+ for charCode in rangeCharCodes:
+ index = charCode + partial
+ assert index < lenGIArray, (
+ "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array is not less than the length of the array (%d) !"
+ % (i, index, lenGIArray)
+ )
+ if glyphIndexArray[index] != 0: # if not missing glyph
+ glyphID = glyphIndexArray[index] + delta
+ else:
+ glyphID = 0 # missing glyph
+ gids.append(glyphID & 0xFFFF)
+
+ self.cmap = _make_map(self.ttFont, charCodes, gids)
+
+ def compile(self, ttFont):
+ if self.data:
+ return (
+ struct.pack(">HHH", self.format, self.length, self.language) + self.data
+ )
+
+ charCodes = list(self.cmap.keys())
+ if not charCodes:
+ startCode = [0xFFFF]
+ endCode = [0xFFFF]
+ else:
+ charCodes.sort()
+ names = [self.cmap[code] for code in charCodes]
+ nameMap = ttFont.getReverseGlyphMap()
+ try:
+ gids = [nameMap[name] for name in names]
+ except KeyError:
+ nameMap = ttFont.getReverseGlyphMap(rebuild=True)
+ try:
+ gids = [nameMap[name] for name in names]
+ except KeyError:
+ # allow virtual GIDs in format 4 tables
+ gids = []
+ for name in names:
+ try:
+ gid = nameMap[name]
+ except KeyError:
+ try:
+ if name[:3] == "gid":
+ gid = int(name[3:])
+ else:
+ gid = ttFont.getGlyphID(name)
+ except:
+ raise KeyError(name)
+
+ gids.append(gid)
+ cmap = {} # code:glyphID mapping
+ for code, gid in zip(charCodes, gids):
+ cmap[code] = gid
+
+ # Build startCode and endCode lists.
+ # Split the char codes in ranges of consecutive char codes, then split
+ # each range in more ranges of consecutive/not consecutive glyph IDs.
+ # See splitRange().
+ lastCode = charCodes[0]
+ endCode = []
+ startCode = [lastCode]
+ for charCode in charCodes[
+ 1:
+ ]: # skip the first code, it's the first start code
+ if charCode == lastCode + 1:
+ lastCode = charCode
+ continue
+ start, end = splitRange(startCode[-1], lastCode, cmap)
+ startCode.extend(start)
+ endCode.extend(end)
+ startCode.append(charCode)
+ lastCode = charCode
+ start, end = splitRange(startCode[-1], lastCode, cmap)
+ startCode.extend(start)
+ endCode.extend(end)
+ startCode.append(0xFFFF)
+ endCode.append(0xFFFF)
+
+ # build up rest of cruft
+ idDelta = []
+ idRangeOffset = []
+ glyphIndexArray = []
+ for i in range(len(endCode) - 1): # skip the closing codes (0xffff)
+ indices = []
+ for charCode in range(startCode[i], endCode[i] + 1):
+ indices.append(cmap[charCode])
+ if indices == list(range(indices[0], indices[0] + len(indices))):
+ idDelta.append((indices[0] - startCode[i]) % 0x10000)
+ idRangeOffset.append(0)
+ else:
+ idDelta.append(0)
+ idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i))
+ glyphIndexArray.extend(indices)
+ idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef
+ idRangeOffset.append(0)
+
+ # Insane.
+ segCount = len(endCode)
+ segCountX2 = segCount * 2
+ searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2)
+
+ charCodeArray = array.array("H", endCode + [0] + startCode)
+ idDeltaArray = array.array("H", idDelta)
+ restArray = array.array("H", idRangeOffset + glyphIndexArray)
+ if sys.byteorder != "big":
+ charCodeArray.byteswap()
+ if sys.byteorder != "big":
+ idDeltaArray.byteswap()
+ if sys.byteorder != "big":
+ restArray.byteswap()
+ data = charCodeArray.tobytes() + idDeltaArray.tobytes() + restArray.tobytes()
+
+ length = struct.calcsize(cmap_format_4_format) + len(data)
+ header = struct.pack(
+ cmap_format_4_format,
+ self.format,
+ length,
+ self.language,
+ segCountX2,
+ searchRange,
+ entrySelector,
+ rangeShift,
+ )
+ return header + data
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.language = safeEval(attrs["language"])
+ if not hasattr(self, "cmap"):
+ self.cmap = {}
+ cmap = self.cmap
+
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ nameMap, attrsMap, dummyContent = element
+ if nameMap != "map":
+ assert 0, "Unrecognized keyword in cmap subtable"
+ cmap[safeEval(attrsMap["code"])] = attrsMap["name"]
class cmap_format_6(CmapSubtable):
-
- def decompile(self, data, ttFont):
- # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
- # If not, someone is calling the subtable decompile() directly, and must provide both args.
- if data is not None and ttFont is not None:
- self.decompileHeader(data, ttFont)
- else:
- assert (data is None and ttFont is None), "Need both data and ttFont arguments"
-
- data = self.data # decompileHeader assigns the data after the header to self.data
- firstCode, entryCount = struct.unpack(">HH", data[:4])
- firstCode = int(firstCode)
- data = data[4:]
- #assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!!
- gids = array.array("H")
- gids.frombytes(data[:2 * int(entryCount)])
- if sys.byteorder != "big": gids.byteswap()
- self.data = data = None
-
- charCodes = list(range(firstCode, firstCode + len(gids)))
- self.cmap = _make_map(self.ttFont, charCodes, gids)
-
- def compile(self, ttFont):
- if self.data:
- return struct.pack(">HHH", self.format, self.length, self.language) + self.data
- cmap = self.cmap
- codes = sorted(cmap.keys())
- if codes: # yes, there are empty cmap tables.
- codes = list(range(codes[0], codes[-1] + 1))
- firstCode = codes[0]
- valueList = [
- ttFont.getGlyphID(cmap[code]) if code in cmap else 0
- for code in codes
- ]
- gids = array.array("H", valueList)
- if sys.byteorder != "big": gids.byteswap()
- data = gids.tobytes()
- else:
- data = b""
- firstCode = 0
- header = struct.pack(">HHHHH",
- 6, len(data) + 10, self.language, firstCode, len(codes))
- return header + data
-
- def fromXML(self, name, attrs, content, ttFont):
- self.language = safeEval(attrs["language"])
- if not hasattr(self, "cmap"):
- self.cmap = {}
- cmap = self.cmap
-
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name != "map":
- continue
- cmap[safeEval(attrs["code"])] = attrs["name"]
+ def decompile(self, data, ttFont):
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
+ if data is not None and ttFont is not None:
+ self.decompileHeader(data, ttFont)
+ else:
+ assert (
+ data is None and ttFont is None
+ ), "Need both data and ttFont arguments"
+
+ data = (
+ self.data
+ ) # decompileHeader assigns the data after the header to self.data
+ firstCode, entryCount = struct.unpack(">HH", data[:4])
+ firstCode = int(firstCode)
+ data = data[4:]
+ # assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!!
+ gids = array.array("H")
+ gids.frombytes(data[: 2 * int(entryCount)])
+ if sys.byteorder != "big":
+ gids.byteswap()
+ self.data = data = None
+
+ charCodes = list(range(firstCode, firstCode + len(gids)))
+ self.cmap = _make_map(self.ttFont, charCodes, gids)
+
+ def compile(self, ttFont):
+ if self.data:
+ return (
+ struct.pack(">HHH", self.format, self.length, self.language) + self.data
+ )
+ cmap = self.cmap
+ codes = sorted(cmap.keys())
+ if codes: # yes, there are empty cmap tables.
+ codes = list(range(codes[0], codes[-1] + 1))
+ firstCode = codes[0]
+ valueList = [
+ ttFont.getGlyphID(cmap[code]) if code in cmap else 0 for code in codes
+ ]
+ gids = array.array("H", valueList)
+ if sys.byteorder != "big":
+ gids.byteswap()
+ data = gids.tobytes()
+ else:
+ data = b""
+ firstCode = 0
+ header = struct.pack(
+ ">HHHHH", 6, len(data) + 10, self.language, firstCode, len(codes)
+ )
+ return header + data
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.language = safeEval(attrs["language"])
+ if not hasattr(self, "cmap"):
+ self.cmap = {}
+ cmap = self.cmap
+
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name != "map":
+ continue
+ cmap[safeEval(attrs["code"])] = attrs["name"]
class cmap_format_12_or_13(CmapSubtable):
-
- def __init__(self, format):
- self.format = format
- self.reserved = 0
- self.data = None
- self.ttFont = None
-
- def decompileHeader(self, data, ttFont):
- format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16])
- assert len(data) == (16 + nGroups*12) == (length), "corrupt cmap table format %d (data length: %d, header length: %d)" % (self.format, len(data), length)
- self.format = format
- self.reserved = reserved
- self.length = length
- self.language = language
- self.nGroups = nGroups
- self.data = data[16:]
- self.ttFont = ttFont
-
- def decompile(self, data, ttFont):
- # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
- # If not, someone is calling the subtable decompile() directly, and must provide both args.
- if data is not None and ttFont is not None:
- self.decompileHeader(data, ttFont)
- else:
- assert (data is None and ttFont is None), "Need both data and ttFont arguments"
-
- data = self.data # decompileHeader assigns the data after the header to self.data
- charCodes = []
- gids = []
- pos = 0
- for i in range(self.nGroups):
- startCharCode, endCharCode, glyphID = struct.unpack(">LLL",data[pos:pos+12] )
- pos += 12
- lenGroup = 1 + endCharCode - startCharCode
- charCodes.extend(list(range(startCharCode, endCharCode +1)))
- gids.extend(self._computeGIDs(glyphID, lenGroup))
- self.data = data = None
- self.cmap = _make_map(self.ttFont, charCodes, gids)
-
- def compile(self, ttFont):
- if self.data:
- return struct.pack(">HHLLL", self.format, self.reserved, self.length, self.language, self.nGroups) + self.data
- charCodes = list(self.cmap.keys())
- names = list(self.cmap.values())
- nameMap = ttFont.getReverseGlyphMap()
- try:
- gids = [nameMap[name] for name in names]
- except KeyError:
- nameMap = ttFont.getReverseGlyphMap(rebuild=True)
- try:
- gids = [nameMap[name] for name in names]
- except KeyError:
- # allow virtual GIDs in format 12 tables
- gids = []
- for name in names:
- try:
- gid = nameMap[name]
- except KeyError:
- try:
- if (name[:3] == 'gid'):
- gid = int(name[3:])
- else:
- gid = ttFont.getGlyphID(name)
- except:
- raise KeyError(name)
-
- gids.append(gid)
-
- cmap = {} # code:glyphID mapping
- for code, gid in zip(charCodes, gids):
- cmap[code] = gid
-
- charCodes.sort()
- index = 0
- startCharCode = charCodes[0]
- startGlyphID = cmap[startCharCode]
- lastGlyphID = startGlyphID - self._format_step
- lastCharCode = startCharCode - 1
- nGroups = 0
- dataList = []
- maxIndex = len(charCodes)
- for index in range(maxIndex):
- charCode = charCodes[index]
- glyphID = cmap[charCode]
- if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode):
- dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
- startCharCode = charCode
- startGlyphID = glyphID
- nGroups = nGroups + 1
- lastGlyphID = glyphID
- lastCharCode = charCode
- dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
- nGroups = nGroups + 1
- data = bytesjoin(dataList)
- lengthSubtable = len(data) +16
- assert len(data) == (nGroups*12) == (lengthSubtable-16)
- return struct.pack(">HHLLL", self.format, self.reserved, lengthSubtable, self.language, nGroups) + data
-
- def toXML(self, writer, ttFont):
- writer.begintag(self.__class__.__name__, [
- ("platformID", self.platformID),
- ("platEncID", self.platEncID),
- ("format", self.format),
- ("reserved", self.reserved),
- ("length", self.length),
- ("language", self.language),
- ("nGroups", self.nGroups),
- ])
- writer.newline()
- codes = sorted(self.cmap.items())
- self._writeCodes(codes, writer)
- writer.endtag(self.__class__.__name__)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.format = safeEval(attrs["format"])
- self.reserved = safeEval(attrs["reserved"])
- self.length = safeEval(attrs["length"])
- self.language = safeEval(attrs["language"])
- self.nGroups = safeEval(attrs["nGroups"])
- if not hasattr(self, "cmap"):
- self.cmap = {}
- cmap = self.cmap
-
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name != "map":
- continue
- cmap[safeEval(attrs["code"])] = attrs["name"]
+ def __init__(self, format):
+ self.format = format
+ self.reserved = 0
+ self.data = None
+ self.ttFont = None
+
+ def decompileHeader(self, data, ttFont):
+ format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16])
+ assert (
+ len(data) == (16 + nGroups * 12) == (length)
+ ), "corrupt cmap table format %d (data length: %d, header length: %d)" % (
+ self.format,
+ len(data),
+ length,
+ )
+ self.format = format
+ self.reserved = reserved
+ self.length = length
+ self.language = language
+ self.nGroups = nGroups
+ self.data = data[16:]
+ self.ttFont = ttFont
+
+ def decompile(self, data, ttFont):
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
+ if data is not None and ttFont is not None:
+ self.decompileHeader(data, ttFont)
+ else:
+ assert (
+ data is None and ttFont is None
+ ), "Need both data and ttFont arguments"
+
+ data = (
+ self.data
+ ) # decompileHeader assigns the data after the header to self.data
+ charCodes = []
+ gids = []
+ pos = 0
+ for i in range(self.nGroups):
+ startCharCode, endCharCode, glyphID = struct.unpack(
+ ">LLL", data[pos : pos + 12]
+ )
+ pos += 12
+ lenGroup = 1 + endCharCode - startCharCode
+ charCodes.extend(list(range(startCharCode, endCharCode + 1)))
+ gids.extend(self._computeGIDs(glyphID, lenGroup))
+ self.data = data = None
+ self.cmap = _make_map(self.ttFont, charCodes, gids)
+
+ def compile(self, ttFont):
+ if self.data:
+ return (
+ struct.pack(
+ ">HHLLL",
+ self.format,
+ self.reserved,
+ self.length,
+ self.language,
+ self.nGroups,
+ )
+ + self.data
+ )
+ charCodes = list(self.cmap.keys())
+ names = list(self.cmap.values())
+ nameMap = ttFont.getReverseGlyphMap()
+ try:
+ gids = [nameMap[name] for name in names]
+ except KeyError:
+ nameMap = ttFont.getReverseGlyphMap(rebuild=True)
+ try:
+ gids = [nameMap[name] for name in names]
+ except KeyError:
+ # allow virtual GIDs in format 12 tables
+ gids = []
+ for name in names:
+ try:
+ gid = nameMap[name]
+ except KeyError:
+ try:
+ if name[:3] == "gid":
+ gid = int(name[3:])
+ else:
+ gid = ttFont.getGlyphID(name)
+ except:
+ raise KeyError(name)
+
+ gids.append(gid)
+
+ cmap = {} # code:glyphID mapping
+ for code, gid in zip(charCodes, gids):
+ cmap[code] = gid
+
+ charCodes.sort()
+ index = 0
+ startCharCode = charCodes[0]
+ startGlyphID = cmap[startCharCode]
+ lastGlyphID = startGlyphID - self._format_step
+ lastCharCode = startCharCode - 1
+ nGroups = 0
+ dataList = []
+ maxIndex = len(charCodes)
+ for index in range(maxIndex):
+ charCode = charCodes[index]
+ glyphID = cmap[charCode]
+ if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode):
+ dataList.append(
+ struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID)
+ )
+ startCharCode = charCode
+ startGlyphID = glyphID
+ nGroups = nGroups + 1
+ lastGlyphID = glyphID
+ lastCharCode = charCode
+ dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
+ nGroups = nGroups + 1
+ data = bytesjoin(dataList)
+ lengthSubtable = len(data) + 16
+ assert len(data) == (nGroups * 12) == (lengthSubtable - 16)
+ return (
+ struct.pack(
+ ">HHLLL",
+ self.format,
+ self.reserved,
+ lengthSubtable,
+ self.language,
+ nGroups,
+ )
+ + data
+ )
+
+ def toXML(self, writer, ttFont):
+ writer.begintag(
+ self.__class__.__name__,
+ [
+ ("platformID", self.platformID),
+ ("platEncID", self.platEncID),
+ ("format", self.format),
+ ("reserved", self.reserved),
+ ("length", self.length),
+ ("language", self.language),
+ ("nGroups", self.nGroups),
+ ],
+ )
+ writer.newline()
+ codes = sorted(self.cmap.items())
+ self._writeCodes(codes, writer)
+ writer.endtag(self.__class__.__name__)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.format = safeEval(attrs["format"])
+ self.reserved = safeEval(attrs["reserved"])
+ self.length = safeEval(attrs["length"])
+ self.language = safeEval(attrs["language"])
+ self.nGroups = safeEval(attrs["nGroups"])
+ if not hasattr(self, "cmap"):
+ self.cmap = {}
+ cmap = self.cmap
+
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name != "map":
+ continue
+ cmap[safeEval(attrs["code"])] = attrs["name"]
class cmap_format_12(cmap_format_12_or_13):
+ _format_step = 1
- _format_step = 1
-
- def __init__(self, format=12):
- cmap_format_12_or_13.__init__(self, format)
+ def __init__(self, format=12):
+ cmap_format_12_or_13.__init__(self, format)
- def _computeGIDs(self, startingGlyph, numberOfGlyphs):
- return list(range(startingGlyph, startingGlyph + numberOfGlyphs))
+ def _computeGIDs(self, startingGlyph, numberOfGlyphs):
+ return list(range(startingGlyph, startingGlyph + numberOfGlyphs))
- def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
- return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode)
+ def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
+ return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode)
class cmap_format_13(cmap_format_12_or_13):
+ _format_step = 0
- _format_step = 0
+ def __init__(self, format=13):
+ cmap_format_12_or_13.__init__(self, format)
- def __init__(self, format=13):
- cmap_format_12_or_13.__init__(self, format)
+ def _computeGIDs(self, startingGlyph, numberOfGlyphs):
+ return [startingGlyph] * numberOfGlyphs
- def _computeGIDs(self, startingGlyph, numberOfGlyphs):
- return [startingGlyph] * numberOfGlyphs
-
- def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
- return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode)
+ def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
+ return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode)
def cvtToUVS(threeByteString):
- data = b"\0" + threeByteString
- val, = struct.unpack(">L", data)
- return val
+ data = b"\0" + threeByteString
+ (val,) = struct.unpack(">L", data)
+ return val
+
def cvtFromUVS(val):
- assert 0 <= val < 0x1000000
- fourByteString = struct.pack(">L", val)
- return fourByteString[1:]
+ assert 0 <= val < 0x1000000
+ fourByteString = struct.pack(">L", val)
+ return fourByteString[1:]
class cmap_format_14(CmapSubtable):
-
- def decompileHeader(self, data, ttFont):
- format, length, numVarSelectorRecords = struct.unpack(">HLL", data[:10])
- self.data = data[10:]
- self.length = length
- self.numVarSelectorRecords = numVarSelectorRecords
- self.ttFont = ttFont
- self.language = 0xFF # has no language.
-
- def decompile(self, data, ttFont):
- if data is not None and ttFont is not None:
- self.decompileHeader(data, ttFont)
- else:
- assert (data is None and ttFont is None), "Need both data and ttFont arguments"
- data = self.data
-
- self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail.
- uvsDict = {}
- recOffset = 0
- for n in range(self.numVarSelectorRecords):
- uvs, defOVSOffset, nonDefUVSOffset = struct.unpack(">3sLL", data[recOffset:recOffset +11])
- recOffset += 11
- varUVS = cvtToUVS(uvs)
- if defOVSOffset:
- startOffset = defOVSOffset - 10
- numValues, = struct.unpack(">L", data[startOffset:startOffset+4])
- startOffset +=4
- for r in range(numValues):
- uv, addtlCnt = struct.unpack(">3sB", data[startOffset:startOffset+4])
- startOffset += 4
- firstBaseUV = cvtToUVS(uv)
- cnt = addtlCnt+1
- baseUVList = list(range(firstBaseUV, firstBaseUV+cnt))
- glyphList = [None]*cnt
- localUVList = zip(baseUVList, glyphList)
- try:
- uvsDict[varUVS].extend(localUVList)
- except KeyError:
- uvsDict[varUVS] = list(localUVList)
-
- if nonDefUVSOffset:
- startOffset = nonDefUVSOffset - 10
- numRecs, = struct.unpack(">L", data[startOffset:startOffset+4])
- startOffset +=4
- localUVList = []
- for r in range(numRecs):
- uv, gid = struct.unpack(">3sH", data[startOffset:startOffset+5])
- startOffset += 5
- uv = cvtToUVS(uv)
- glyphName = self.ttFont.getGlyphName(gid)
- localUVList.append((uv, glyphName))
- try:
- uvsDict[varUVS].extend(localUVList)
- except KeyError:
- uvsDict[varUVS] = localUVList
-
- self.uvsDict = uvsDict
-
- def toXML(self, writer, ttFont):
- writer.begintag(self.__class__.__name__, [
- ("platformID", self.platformID),
- ("platEncID", self.platEncID),
- ])
- writer.newline()
- uvsDict = self.uvsDict
- uvsList = sorted(uvsDict.keys())
- for uvs in uvsList:
- uvList = uvsDict[uvs]
- uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1]))
- for uv, gname in uvList:
- attrs = [("uv", hex(uv)), ("uvs", hex(uvs))]
- if gname is not None:
- attrs.append(("name", gname))
- writer.simpletag("map", attrs)
- writer.newline()
- writer.endtag(self.__class__.__name__)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.language = 0xFF # provide a value so that CmapSubtable.__lt__() won't fail
- if not hasattr(self, "cmap"):
- self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail.
- if not hasattr(self, "uvsDict"):
- self.uvsDict = {}
- uvsDict = self.uvsDict
-
- # For backwards compatibility reasons we accept "None" as an indicator
- # for "default mapping", unless the font actually has a glyph named
- # "None".
- _hasGlyphNamedNone = None
-
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name != "map":
- continue
- uvs = safeEval(attrs["uvs"])
- uv = safeEval(attrs["uv"])
- gname = attrs.get("name")
- if gname == "None":
- if _hasGlyphNamedNone is None:
- _hasGlyphNamedNone = "None" in ttFont.getGlyphOrder()
- if not _hasGlyphNamedNone:
- gname = None
- try:
- uvsDict[uvs].append((uv, gname))
- except KeyError:
- uvsDict[uvs] = [(uv, gname)]
-
- def compile(self, ttFont):
- if self.data:
- return struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords) + self.data
-
- uvsDict = self.uvsDict
- uvsList = sorted(uvsDict.keys())
- self.numVarSelectorRecords = len(uvsList)
- offset = 10 + self.numVarSelectorRecords*11 # current value is end of VarSelectorRecords block.
- data = []
- varSelectorRecords =[]
- for uvs in uvsList:
- entryList = uvsDict[uvs]
-
- defList = [entry for entry in entryList if entry[1] is None]
- if defList:
- defList = [entry[0] for entry in defList]
- defOVSOffset = offset
- defList.sort()
-
- lastUV = defList[0]
- cnt = -1
- defRecs = []
- for defEntry in defList:
- cnt +=1
- if (lastUV+cnt) != defEntry:
- rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt-1)
- lastUV = defEntry
- defRecs.append(rec)
- cnt = 0
-
- rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt)
- defRecs.append(rec)
-
- numDefRecs = len(defRecs)
- data.append(struct.pack(">L", numDefRecs))
- data.extend(defRecs)
- offset += 4 + numDefRecs*4
- else:
- defOVSOffset = 0
-
- ndefList = [entry for entry in entryList if entry[1] is not None]
- if ndefList:
- nonDefUVSOffset = offset
- ndefList.sort()
- numNonDefRecs = len(ndefList)
- data.append(struct.pack(">L", numNonDefRecs))
- offset += 4 + numNonDefRecs*5
-
- for uv, gname in ndefList:
- gid = ttFont.getGlyphID(gname)
- ndrec = struct.pack(">3sH", cvtFromUVS(uv), gid)
- data.append(ndrec)
- else:
- nonDefUVSOffset = 0
-
- vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset)
- varSelectorRecords.append(vrec)
-
- data = bytesjoin(varSelectorRecords) + bytesjoin(data)
- self.length = 10 + len(data)
- headerdata = struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords)
-
- return headerdata + data
+ def decompileHeader(self, data, ttFont):
+ format, length, numVarSelectorRecords = struct.unpack(">HLL", data[:10])
+ self.data = data[10:]
+ self.length = length
+ self.numVarSelectorRecords = numVarSelectorRecords
+ self.ttFont = ttFont
+ self.language = 0xFF # has no language.
+
+ def decompile(self, data, ttFont):
+ if data is not None and ttFont is not None:
+ self.decompileHeader(data, ttFont)
+ else:
+ assert (
+ data is None and ttFont is None
+ ), "Need both data and ttFont arguments"
+ data = self.data
+
+ self.cmap = (
+ {}
+ ) # so that clients that expect this to exist in a cmap table won't fail.
+ uvsDict = {}
+ recOffset = 0
+ for n in range(self.numVarSelectorRecords):
+ uvs, defOVSOffset, nonDefUVSOffset = struct.unpack(
+ ">3sLL", data[recOffset : recOffset + 11]
+ )
+ recOffset += 11
+ varUVS = cvtToUVS(uvs)
+ if defOVSOffset:
+ startOffset = defOVSOffset - 10
+ (numValues,) = struct.unpack(">L", data[startOffset : startOffset + 4])
+ startOffset += 4
+ for r in range(numValues):
+ uv, addtlCnt = struct.unpack(
+ ">3sB", data[startOffset : startOffset + 4]
+ )
+ startOffset += 4
+ firstBaseUV = cvtToUVS(uv)
+ cnt = addtlCnt + 1
+ baseUVList = list(range(firstBaseUV, firstBaseUV + cnt))
+ glyphList = [None] * cnt
+ localUVList = zip(baseUVList, glyphList)
+ try:
+ uvsDict[varUVS].extend(localUVList)
+ except KeyError:
+ uvsDict[varUVS] = list(localUVList)
+
+ if nonDefUVSOffset:
+ startOffset = nonDefUVSOffset - 10
+ (numRecs,) = struct.unpack(">L", data[startOffset : startOffset + 4])
+ startOffset += 4
+ localUVList = []
+ for r in range(numRecs):
+ uv, gid = struct.unpack(">3sH", data[startOffset : startOffset + 5])
+ startOffset += 5
+ uv = cvtToUVS(uv)
+ glyphName = self.ttFont.getGlyphName(gid)
+ localUVList.append((uv, glyphName))
+ try:
+ uvsDict[varUVS].extend(localUVList)
+ except KeyError:
+ uvsDict[varUVS] = localUVList
+
+ self.uvsDict = uvsDict
+
+ def toXML(self, writer, ttFont):
+ writer.begintag(
+ self.__class__.__name__,
+ [
+ ("platformID", self.platformID),
+ ("platEncID", self.platEncID),
+ ],
+ )
+ writer.newline()
+ uvsDict = self.uvsDict
+ uvsList = sorted(uvsDict.keys())
+ for uvs in uvsList:
+ uvList = uvsDict[uvs]
+ uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1]))
+ for uv, gname in uvList:
+ attrs = [("uv", hex(uv)), ("uvs", hex(uvs))]
+ if gname is not None:
+ attrs.append(("name", gname))
+ writer.simpletag("map", attrs)
+ writer.newline()
+ writer.endtag(self.__class__.__name__)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.language = 0xFF # provide a value so that CmapSubtable.__lt__() won't fail
+ if not hasattr(self, "cmap"):
+ self.cmap = (
+ {}
+ ) # so that clients that expect this to exist in a cmap table won't fail.
+ if not hasattr(self, "uvsDict"):
+ self.uvsDict = {}
+ uvsDict = self.uvsDict
+
+ # For backwards compatibility reasons we accept "None" as an indicator
+ # for "default mapping", unless the font actually has a glyph named
+ # "None".
+ _hasGlyphNamedNone = None
+
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name != "map":
+ continue
+ uvs = safeEval(attrs["uvs"])
+ uv = safeEval(attrs["uv"])
+ gname = attrs.get("name")
+ if gname == "None":
+ if _hasGlyphNamedNone is None:
+ _hasGlyphNamedNone = "None" in ttFont.getGlyphOrder()
+ if not _hasGlyphNamedNone:
+ gname = None
+ try:
+ uvsDict[uvs].append((uv, gname))
+ except KeyError:
+ uvsDict[uvs] = [(uv, gname)]
+
+ def compile(self, ttFont):
+ if self.data:
+ return (
+ struct.pack(
+ ">HLL", self.format, self.length, self.numVarSelectorRecords
+ )
+ + self.data
+ )
+
+ uvsDict = self.uvsDict
+ uvsList = sorted(uvsDict.keys())
+ self.numVarSelectorRecords = len(uvsList)
+ offset = (
+ 10 + self.numVarSelectorRecords * 11
+ ) # current value is end of VarSelectorRecords block.
+ data = []
+ varSelectorRecords = []
+ for uvs in uvsList:
+ entryList = uvsDict[uvs]
+
+ defList = [entry for entry in entryList if entry[1] is None]
+ if defList:
+ defList = [entry[0] for entry in defList]
+ defOVSOffset = offset
+ defList.sort()
+
+ lastUV = defList[0]
+ cnt = -1
+ defRecs = []
+ for defEntry in defList:
+ cnt += 1
+ if (lastUV + cnt) != defEntry:
+ rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt - 1)
+ lastUV = defEntry
+ defRecs.append(rec)
+ cnt = 0
+
+ rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt)
+ defRecs.append(rec)
+
+ numDefRecs = len(defRecs)
+ data.append(struct.pack(">L", numDefRecs))
+ data.extend(defRecs)
+ offset += 4 + numDefRecs * 4
+ else:
+ defOVSOffset = 0
+
+ ndefList = [entry for entry in entryList if entry[1] is not None]
+ if ndefList:
+ nonDefUVSOffset = offset
+ ndefList.sort()
+ numNonDefRecs = len(ndefList)
+ data.append(struct.pack(">L", numNonDefRecs))
+ offset += 4 + numNonDefRecs * 5
+
+ for uv, gname in ndefList:
+ gid = ttFont.getGlyphID(gname)
+ ndrec = struct.pack(">3sH", cvtFromUVS(uv), gid)
+ data.append(ndrec)
+ else:
+ nonDefUVSOffset = 0
+
+ vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset)
+ varSelectorRecords.append(vrec)
+
+ data = bytesjoin(varSelectorRecords) + bytesjoin(data)
+ self.length = 10 + len(data)
+ headerdata = struct.pack(
+ ">HLL", self.format, self.length, self.numVarSelectorRecords
+ )
+
+ return headerdata + data
class cmap_format_unknown(CmapSubtable):
+ def toXML(self, writer, ttFont):
+ cmapName = self.__class__.__name__[:12] + str(self.format)
+ writer.begintag(
+ cmapName,
+ [
+ ("platformID", self.platformID),
+ ("platEncID", self.platEncID),
+ ],
+ )
+ writer.newline()
+ writer.dumphex(self.data)
+ writer.endtag(cmapName)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.data = readHex(content)
+ self.cmap = {}
+
+ def decompileHeader(self, data, ttFont):
+ self.language = 0 # dummy value
+ self.data = data
+
+ def decompile(self, data, ttFont):
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
+ if data is not None and ttFont is not None:
+ self.decompileHeader(data, ttFont)
+ else:
+ assert (
+ data is None and ttFont is None
+ ), "Need both data and ttFont arguments"
+
+ def compile(self, ttFont):
+ if self.data:
+ return self.data
+ else:
+ return None
- def toXML(self, writer, ttFont):
- cmapName = self.__class__.__name__[:12] + str(self.format)
- writer.begintag(cmapName, [
- ("platformID", self.platformID),
- ("platEncID", self.platEncID),
- ])
- writer.newline()
- writer.dumphex(self.data)
- writer.endtag(cmapName)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.data = readHex(content)
- self.cmap = {}
-
- def decompileHeader(self, data, ttFont):
- self.language = 0 # dummy value
- self.data = data
-
- def decompile(self, data, ttFont):
- # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
- # If not, someone is calling the subtable decompile() directly, and must provide both args.
- if data is not None and ttFont is not None:
- self.decompileHeader(data, ttFont)
- else:
- assert (data is None and ttFont is None), "Need both data and ttFont arguments"
-
- def compile(self, ttFont):
- if self.data:
- return self.data
- else:
- return None
cmap_classes = {
- 0: cmap_format_0,
- 2: cmap_format_2,
- 4: cmap_format_4,
- 6: cmap_format_6,
- 12: cmap_format_12,
- 13: cmap_format_13,
- 14: cmap_format_14,
+ 0: cmap_format_0,
+ 2: cmap_format_2,
+ 4: cmap_format_4,
+ 6: cmap_format_6,
+ 12: cmap_format_12,
+ 13: cmap_format_13,
+ 14: cmap_format_14,
}
diff --git a/Lib/fontTools/ttLib/tables/_c_v_a_r.py b/Lib/fontTools/ttLib/tables/_c_v_a_r.py
index a67efe02..6ea44dba 100644
--- a/Lib/fontTools/ttLib/tables/_c_v_a_r.py
+++ b/Lib/fontTools/ttLib/tables/_c_v_a_r.py
@@ -1,8 +1,11 @@
from . import DefaultTable
from fontTools.misc import sstruct
from fontTools.misc.textTools import bytesjoin
-from fontTools.ttLib.tables.TupleVariation import \
- compileTupleVariationStore, decompileTupleVariationStore, TupleVariation
+from fontTools.ttLib.tables.TupleVariation import (
+ compileTupleVariationStore,
+ decompileTupleVariationStore,
+ TupleVariation,
+)
# https://www.microsoft.com/typography/otspec/cvar.htm
@@ -34,18 +37,15 @@ class table__c_v_a_r(DefaultTable.DefaultTable):
pointCount=len(ttFont["cvt "].values),
axisTags=[axis.axisTag for axis in ttFont["fvar"].axes],
sharedTupleIndices={},
- useSharedPoints=useSharedPoints)
+ useSharedPoints=useSharedPoints,
+ )
header = {
"majorVersion": self.majorVersion,
"minorVersion": self.minorVersion,
"tupleVariationCount": tupleVariationCount,
"offsetToData": CVAR_HEADER_SIZE + len(tuples),
}
- return b''.join([
- sstruct.pack(CVAR_HEADER_FORMAT, header),
- tuples,
- data
- ])
+ return b"".join([sstruct.pack(CVAR_HEADER_FORMAT, header), tuples, data])
def decompile(self, data, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
@@ -55,10 +55,15 @@ class table__c_v_a_r(DefaultTable.DefaultTable):
self.minorVersion = header["minorVersion"]
assert self.majorVersion == 1, self.majorVersion
self.variations = decompileTupleVariationStore(
- tableTag=self.tableTag, axisTags=axisTags,
+ tableTag=self.tableTag,
+ axisTags=axisTags,
tupleVariationCount=header["tupleVariationCount"],
- pointCount=len(ttFont["cvt "].values), sharedTuples=None,
- data=data, pos=CVAR_HEADER_SIZE, dataPos=header["offsetToData"])
+ pointCount=len(ttFont["cvt "].values),
+ sharedTuples=None,
+ data=data,
+ pos=CVAR_HEADER_SIZE,
+ dataPos=header["offsetToData"],
+ )
def fromXML(self, name, attrs, content, ttFont):
if name == "version":
@@ -75,8 +80,7 @@ class table__c_v_a_r(DefaultTable.DefaultTable):
def toXML(self, writer, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
- writer.simpletag("version",
- major=self.majorVersion, minor=self.minorVersion)
+ writer.simpletag("version", major=self.majorVersion, minor=self.minorVersion)
writer.newline()
for var in self.variations:
var.toXML(writer, axisTags)
diff --git a/Lib/fontTools/ttLib/tables/_c_v_t.py b/Lib/fontTools/ttLib/tables/_c_v_t.py
index 26395c93..7f946775 100644
--- a/Lib/fontTools/ttLib/tables/_c_v_t.py
+++ b/Lib/fontTools/ttLib/tables/_c_v_t.py
@@ -3,43 +3,45 @@ from . import DefaultTable
import sys
import array
-class table__c_v_t(DefaultTable.DefaultTable):
- def decompile(self, data, ttFont):
- values = array.array("h")
- values.frombytes(data)
- if sys.byteorder != "big": values.byteswap()
- self.values = values
-
- def compile(self, ttFont):
- values = self.values[:]
- if sys.byteorder != "big": values.byteswap()
- return values.tobytes()
-
- def toXML(self, writer, ttFont):
- for i in range(len(self.values)):
- value = self.values[i]
- writer.simpletag("cv", value=value, index=i)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if not hasattr(self, "values"):
- self.values = array.array("h")
- if name == "cv":
- index = safeEval(attrs["index"])
- value = safeEval(attrs["value"])
- for i in range(1 + index - len(self.values)):
- self.values.append(0)
- self.values[index] = value
-
- def __len__(self):
- return len(self.values)
-
- def __getitem__(self, index):
- return self.values[index]
-
- def __setitem__(self, index, value):
- self.values[index] = value
-
- def __delitem__(self, index):
- del self.values[index]
+class table__c_v_t(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ values = array.array("h")
+ values.frombytes(data)
+ if sys.byteorder != "big":
+ values.byteswap()
+ self.values = values
+
+ def compile(self, ttFont):
+ values = self.values[:]
+ if sys.byteorder != "big":
+ values.byteswap()
+ return values.tobytes()
+
+ def toXML(self, writer, ttFont):
+ for i in range(len(self.values)):
+ value = self.values[i]
+ writer.simpletag("cv", value=value, index=i)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if not hasattr(self, "values"):
+ self.values = array.array("h")
+ if name == "cv":
+ index = safeEval(attrs["index"])
+ value = safeEval(attrs["value"])
+ for i in range(1 + index - len(self.values)):
+ self.values.append(0)
+ self.values[index] = value
+
+ def __len__(self):
+ return len(self.values)
+
+ def __getitem__(self, index):
+ return self.values[index]
+
+ def __setitem__(self, index, value):
+ self.values[index] = value
+
+ def __delitem__(self, index):
+ del self.values[index]
diff --git a/Lib/fontTools/ttLib/tables/_f_e_a_t.py b/Lib/fontTools/ttLib/tables/_f_e_a_t.py
index 079b514c..c9a48eff 100644
--- a/Lib/fontTools/ttLib/tables/_f_e_a_t.py
+++ b/Lib/fontTools/ttLib/tables/_f_e_a_t.py
@@ -2,10 +2,11 @@ from .otBase import BaseTTXConverter
class table__f_e_a_t(BaseTTXConverter):
- """The feature name table is an AAT (Apple Advanced Typography) table for
- storing font features, settings, and their human-readable names. It should
- not be confused with the ``Feat`` table or the OpenType Layout ``GSUB``/``GPOS``
- tables. See `Feature Name Table <https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6feat.html>`_
- in the TrueType Reference Manual for more information on the structure and
- purpose of this table."""
- pass
+ """The feature name table is an AAT (Apple Advanced Typography) table for
+ storing font features, settings, and their human-readable names. It should
+ not be confused with the ``Feat`` table or the OpenType Layout ``GSUB``/``GPOS``
+ tables. See `Feature Name Table <https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6feat.html>`_
+ in the TrueType Reference Manual for more information on the structure and
+ purpose of this table."""
+
+ pass
diff --git a/Lib/fontTools/ttLib/tables/_f_p_g_m.py b/Lib/fontTools/ttLib/tables/_f_p_g_m.py
index ec3576ce..df23041d 100644
--- a/Lib/fontTools/ttLib/tables/_f_p_g_m.py
+++ b/Lib/fontTools/ttLib/tables/_f_p_g_m.py
@@ -1,48 +1,49 @@
from . import DefaultTable
from . import ttProgram
-class table__f_p_g_m(DefaultTable.DefaultTable):
- def decompile(self, data, ttFont):
- program = ttProgram.Program()
- program.fromBytecode(data)
- self.program = program
-
- def compile(self, ttFont):
- return self.program.getBytecode()
-
- def toXML(self, writer, ttFont):
- self.program.toXML(writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- program = ttProgram.Program()
- program.fromXML(name, attrs, content, ttFont)
- self.program = program
-
- def __bool__(self):
- """
- >>> fpgm = table__f_p_g_m()
- >>> bool(fpgm)
- False
- >>> p = ttProgram.Program()
- >>> fpgm.program = p
- >>> bool(fpgm)
- False
- >>> bc = bytearray([0])
- >>> p.fromBytecode(bc)
- >>> bool(fpgm)
- True
- >>> p.bytecode.pop()
- 0
- >>> bool(fpgm)
- False
- """
- return hasattr(self, 'program') and bool(self.program)
-
- __nonzero__ = __bool__
+class table__f_p_g_m(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ program = ttProgram.Program()
+ program.fromBytecode(data)
+ self.program = program
+
+ def compile(self, ttFont):
+ return self.program.getBytecode()
+
+ def toXML(self, writer, ttFont):
+ self.program.toXML(writer, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ program = ttProgram.Program()
+ program.fromXML(name, attrs, content, ttFont)
+ self.program = program
+
+ def __bool__(self):
+ """
+ >>> fpgm = table__f_p_g_m()
+ >>> bool(fpgm)
+ False
+ >>> p = ttProgram.Program()
+ >>> fpgm.program = p
+ >>> bool(fpgm)
+ False
+ >>> bc = bytearray([0])
+ >>> p.fromBytecode(bc)
+ >>> bool(fpgm)
+ True
+ >>> p.bytecode.pop()
+ 0
+ >>> bool(fpgm)
+ False
+ """
+ return hasattr(self, "program") and bool(self.program)
+
+ __nonzero__ = __bool__
if __name__ == "__main__":
- import sys
- import doctest
- sys.exit(doctest.testmod().failed)
+ import sys
+ import doctest
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/ttLib/tables/_f_v_a_r.py b/Lib/fontTools/ttLib/tables/_f_v_a_r.py
index d7409195..062a9aa4 100644
--- a/Lib/fontTools/ttLib/tables/_f_v_a_r.py
+++ b/Lib/fontTools/ttLib/tables/_f_v_a_r.py
@@ -41,6 +41,7 @@ FVAR_INSTANCE_FORMAT = """
flags: H
"""
+
class table__f_v_a_r(DefaultTable.DefaultTable):
dependencies = ["name"]
@@ -51,8 +52,9 @@ class table__f_v_a_r(DefaultTable.DefaultTable):
def compile(self, ttFont):
instanceSize = sstruct.calcsize(FVAR_INSTANCE_FORMAT) + (len(self.axes) * 4)
- includePostScriptNames = any(instance.postscriptNameID != 0xFFFF
- for instance in self.instances)
+ includePostScriptNames = any(
+ instance.postscriptNameID != 0xFFFF for instance in self.instances
+ )
if includePostScriptNames:
instanceSize += 2
header = {
@@ -81,14 +83,14 @@ class table__f_v_a_r(DefaultTable.DefaultTable):
axisSize = header["axisSize"]
for _ in range(header["axisCount"]):
axis = Axis()
- axis.decompile(data[pos:pos+axisSize])
+ axis.decompile(data[pos : pos + axisSize])
self.axes.append(axis)
pos += axisSize
instanceSize = header["instanceSize"]
axisTags = [axis.axisTag for axis in self.axes]
for _ in range(header["instanceCount"]):
instance = NamedInstance()
- instance.decompile(data[pos:pos+instanceSize], axisTags)
+ instance.decompile(data[pos : pos + instanceSize], axisTags)
self.instances.append(instance)
pos += instanceSize
@@ -108,6 +110,7 @@ class table__f_v_a_r(DefaultTable.DefaultTable):
instance.fromXML(name, attrs, content, ttFont)
self.instances.append(instance)
+
class Axis(object):
def __init__(self):
self.axisTag = None
@@ -124,19 +127,23 @@ class Axis(object):
sstruct.unpack2(FVAR_AXIS_FORMAT, data, self)
def toXML(self, writer, ttFont):
- name = ttFont["name"].getDebugName(self.axisNameID)
+ name = (
+ ttFont["name"].getDebugName(self.axisNameID) if "name" in ttFont else None
+ )
if name is not None:
writer.newline()
writer.comment(name)
writer.newline()
writer.begintag("Axis")
writer.newline()
- for tag, value in [("AxisTag", self.axisTag),
- ("Flags", "0x%X" % self.flags),
- ("MinValue", fl2str(self.minValue, 16)),
- ("DefaultValue", fl2str(self.defaultValue, 16)),
- ("MaxValue", fl2str(self.maxValue, 16)),
- ("AxisNameID", str(self.axisNameID))]:
+ for tag, value in [
+ ("AxisTag", self.axisTag),
+ ("Flags", "0x%X" % self.flags),
+ ("MinValue", fl2str(self.minValue, 16)),
+ ("DefaultValue", fl2str(self.defaultValue, 16)),
+ ("MaxValue", fl2str(self.maxValue, 16)),
+ ("AxisNameID", str(self.axisNameID)),
+ ]:
writer.begintag(tag)
writer.write(value)
writer.endtag(tag)
@@ -145,17 +152,16 @@ class Axis(object):
writer.newline()
def fromXML(self, name, _attrs, content, ttFont):
- assert(name == "Axis")
+ assert name == "Axis"
for tag, _, value in filter(lambda t: type(t) is tuple, content):
- value = ''.join(value)
+ value = "".join(value)
if tag == "AxisTag":
self.axisTag = Tag(value)
- elif tag in {"Flags", "MinValue", "DefaultValue", "MaxValue",
- "AxisNameID"}:
+ elif tag in {"Flags", "MinValue", "DefaultValue", "MaxValue", "AxisNameID"}:
setattr(
self,
tag[0].lower() + tag[1:],
- str2fl(value, 16) if tag.endswith("Value") else safeEval(value)
+ str2fl(value, 16) if tag.endswith("Value") else safeEval(value),
)
@@ -183,37 +189,54 @@ class NamedInstance(object):
self.coordinates[axis] = fi2fl(value, 16)
pos += 4
if pos + 2 <= len(data):
- self.postscriptNameID = struct.unpack(">H", data[pos : pos + 2])[0]
+ self.postscriptNameID = struct.unpack(">H", data[pos : pos + 2])[0]
else:
- self.postscriptNameID = 0xFFFF
+ self.postscriptNameID = 0xFFFF
def toXML(self, writer, ttFont):
- name = ttFont["name"].getDebugName(self.subfamilyNameID)
+ name = (
+ ttFont["name"].getDebugName(self.subfamilyNameID)
+ if "name" in ttFont
+ else None
+ )
if name is not None:
writer.newline()
writer.comment(name)
writer.newline()
- psname = ttFont["name"].getDebugName(self.postscriptNameID)
+ psname = (
+ ttFont["name"].getDebugName(self.postscriptNameID)
+ if "name" in ttFont
+ else None
+ )
if psname is not None:
- writer.comment(u"PostScript: " + psname)
+ writer.comment("PostScript: " + psname)
writer.newline()
- if self.postscriptNameID == 0xFFFF:
- writer.begintag("NamedInstance", flags=("0x%X" % self.flags),
- subfamilyNameID=self.subfamilyNameID)
+ if self.postscriptNameID == 0xFFFF:
+ writer.begintag(
+ "NamedInstance",
+ flags=("0x%X" % self.flags),
+ subfamilyNameID=self.subfamilyNameID,
+ )
else:
- writer.begintag("NamedInstance", flags=("0x%X" % self.flags),
- subfamilyNameID=self.subfamilyNameID,
- postscriptNameID=self.postscriptNameID, )
+ writer.begintag(
+ "NamedInstance",
+ flags=("0x%X" % self.flags),
+ subfamilyNameID=self.subfamilyNameID,
+ postscriptNameID=self.postscriptNameID,
+ )
writer.newline()
for axis in ttFont["fvar"].axes:
- writer.simpletag("coord", axis=axis.axisTag,
- value=fl2str(self.coordinates[axis.axisTag], 16))
+ writer.simpletag(
+ "coord",
+ axis=axis.axisTag,
+ value=fl2str(self.coordinates[axis.axisTag], 16),
+ )
writer.newline()
writer.endtag("NamedInstance")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
- assert(name == "NamedInstance")
+ assert name == "NamedInstance"
self.subfamilyNameID = safeEval(attrs["subfamilyNameID"])
self.flags = safeEval(attrs.get("flags", "0"))
if "postscriptNameID" in attrs:
diff --git a/Lib/fontTools/ttLib/tables/_g_a_s_p.py b/Lib/fontTools/ttLib/tables/_g_a_s_p.py
index 2c80913c..10c32a87 100644
--- a/Lib/fontTools/ttLib/tables/_g_a_s_p.py
+++ b/Lib/fontTools/ttLib/tables/_g_a_s_p.py
@@ -8,42 +8,48 @@ GASP_SYMMETRIC_SMOOTHING = 0x0008
GASP_DOGRAY = 0x0002
GASP_GRIDFIT = 0x0001
-class table__g_a_s_p(DefaultTable.DefaultTable):
- def decompile(self, data, ttFont):
- self.version, numRanges = struct.unpack(">HH", data[:4])
- assert 0 <= self.version <= 1, "unknown 'gasp' format: %s" % self.version
- data = data[4:]
- self.gaspRange = {}
- for i in range(numRanges):
- rangeMaxPPEM, rangeGaspBehavior = struct.unpack(">HH", data[:4])
- self.gaspRange[int(rangeMaxPPEM)] = int(rangeGaspBehavior)
- data = data[4:]
- assert not data, "too much data"
+class table__g_a_s_p(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ self.version, numRanges = struct.unpack(">HH", data[:4])
+ assert 0 <= self.version <= 1, "unknown 'gasp' format: %s" % self.version
+ data = data[4:]
+ self.gaspRange = {}
+ for i in range(numRanges):
+ rangeMaxPPEM, rangeGaspBehavior = struct.unpack(">HH", data[:4])
+ self.gaspRange[int(rangeMaxPPEM)] = int(rangeGaspBehavior)
+ data = data[4:]
+ assert not data, "too much data"
- def compile(self, ttFont):
- version = 0 # ignore self.version
- numRanges = len(self.gaspRange)
- data = b""
- items = sorted(self.gaspRange.items())
- for rangeMaxPPEM, rangeGaspBehavior in items:
- data = data + struct.pack(">HH", rangeMaxPPEM, rangeGaspBehavior)
- if rangeGaspBehavior & ~(GASP_GRIDFIT | GASP_DOGRAY):
- version = 1
- data = struct.pack(">HH", version, numRanges) + data
- return data
+ def compile(self, ttFont):
+ version = 0 # ignore self.version
+ numRanges = len(self.gaspRange)
+ data = b""
+ items = sorted(self.gaspRange.items())
+ for rangeMaxPPEM, rangeGaspBehavior in items:
+ data = data + struct.pack(">HH", rangeMaxPPEM, rangeGaspBehavior)
+ if rangeGaspBehavior & ~(GASP_GRIDFIT | GASP_DOGRAY):
+ version = 1
+ data = struct.pack(">HH", version, numRanges) + data
+ return data
- def toXML(self, writer, ttFont):
- items = sorted(self.gaspRange.items())
- for rangeMaxPPEM, rangeGaspBehavior in items:
- writer.simpletag("gaspRange", [
- ("rangeMaxPPEM", rangeMaxPPEM),
- ("rangeGaspBehavior", rangeGaspBehavior)])
- writer.newline()
+ def toXML(self, writer, ttFont):
+ items = sorted(self.gaspRange.items())
+ for rangeMaxPPEM, rangeGaspBehavior in items:
+ writer.simpletag(
+ "gaspRange",
+ [
+ ("rangeMaxPPEM", rangeMaxPPEM),
+ ("rangeGaspBehavior", rangeGaspBehavior),
+ ],
+ )
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- if name != "gaspRange":
- return
- if not hasattr(self, "gaspRange"):
- self.gaspRange = {}
- self.gaspRange[safeEval(attrs["rangeMaxPPEM"])] = safeEval(attrs["rangeGaspBehavior"])
+ def fromXML(self, name, attrs, content, ttFont):
+ if name != "gaspRange":
+ return
+ if not hasattr(self, "gaspRange"):
+ self.gaspRange = {}
+ self.gaspRange[safeEval(attrs["rangeMaxPPEM"])] = safeEval(
+ attrs["rangeGaspBehavior"]
+ )
diff --git a/Lib/fontTools/ttLib/tables/_g_l_y_f.py b/Lib/fontTools/ttLib/tables/_g_l_y_f.py
index 745ef72b..bff0d92c 100644
--- a/Lib/fontTools/ttLib/tables/_g_l_y_f.py
+++ b/Lib/fontTools/ttLib/tables/_g_l_y_f.py
@@ -4,16 +4,18 @@ from collections import namedtuple
from fontTools.misc import sstruct
from fontTools import ttLib
from fontTools import version
+from fontTools.misc.transform import DecomposedTransform
from fontTools.misc.textTools import tostr, safeEval, pad
-from fontTools.misc.arrayTools import calcIntBounds, pointInRect
+from fontTools.misc.arrayTools import updateBounds, pointInRect
from fontTools.misc.bezierTools import calcQuadraticBounds
from fontTools.misc.fixedTools import (
- fixedToFloat as fi2fl,
- floatToFixed as fl2fi,
- floatToFixedToStr as fl2str,
- strToFixedToFloat as str2fl,
- otRound,
+ fixedToFloat as fi2fl,
+ floatToFixed as fl2fi,
+ floatToFixedToStr as fl2str,
+ strToFixedToFloat as str2fl,
)
+from fontTools.misc.roundTools import noRound, otRound
+from fontTools.misc.vector import Vector
from numbers import Number
from . import DefaultTable
from . import ttProgram
@@ -21,17 +23,22 @@ import sys
import struct
import array
import logging
+import math
import os
from fontTools.misc import xmlWriter
from fontTools.misc.filenames import userNameToFileName
from fontTools.misc.loggingTools import deprecateFunction
+from enum import IntFlag
+from functools import partial
+from types import SimpleNamespace
+from typing import Set
log = logging.getLogger(__name__)
# We compute the version the same as is computed in ttlib/__init__
# so that we can write 'ttLibVersion' attribute of the glyf TTX files
# when glyf is written to separate files.
-version = ".".join(version.split('.')[:2])
+version = ".".join(version.split(".")[:2])
#
# The Apple and MS rasterizers behave differently for
@@ -43,459 +50,526 @@ version = ".".join(version.split('.')[:2])
# WE_HAVE_A_SCALE (eg. Chicago) case, and not when it's WE_HAVE_AN_X_AND_Y_SCALE
# (eg. Charcoal)...
#
-SCALE_COMPONENT_OFFSET_DEFAULT = 0 # 0 == MS, 1 == Apple
+SCALE_COMPONENT_OFFSET_DEFAULT = 0 # 0 == MS, 1 == Apple
class table__g_l_y_f(DefaultTable.DefaultTable):
- """Glyph Data Table
-
- This class represents the `glyf <https://docs.microsoft.com/en-us/typography/opentype/spec/glyf>`_
- table, which contains outlines for glyphs in TrueType format. In many cases,
- it is easier to access and manipulate glyph outlines through the ``GlyphSet``
- object returned from :py:meth:`fontTools.ttLib.ttFont.getGlyphSet`::
-
- >> from fontTools.pens.boundsPen import BoundsPen
- >> glyphset = font.getGlyphSet()
- >> bp = BoundsPen(glyphset)
- >> glyphset["A"].draw(bp)
- >> bp.bounds
- (19, 0, 633, 716)
-
- However, this class can be used for low-level access to the ``glyf`` table data.
- Objects of this class support dictionary-like access, mapping glyph names to
- :py:class:`Glyph` objects::
-
- >> glyf = font["glyf"]
- >> len(glyf["Aacute"].components)
- 2
-
- Note that when adding glyphs to the font via low-level access to the ``glyf``
- table, the new glyphs must also be added to the ``hmtx``/``vmtx`` table::
-
- >> font["glyf"]["divisionslash"] = Glyph()
- >> font["hmtx"]["divisionslash"] = (640, 0)
-
- """
-
- # this attribute controls the amount of padding applied to glyph data upon compile.
- # Glyph lenghts are aligned to multiples of the specified value.
- # Allowed values are (0, 1, 2, 4). '0' means no padding; '1' (default) also means
- # no padding, except for when padding would allow to use short loca offsets.
- padding = 1
-
- def decompile(self, data, ttFont):
- loca = ttFont['loca']
- pos = int(loca[0])
- nextPos = 0
- noname = 0
- self.glyphs = {}
- self.glyphOrder = glyphOrder = ttFont.getGlyphOrder()
- for i in range(0, len(loca)-1):
- try:
- glyphName = glyphOrder[i]
- except IndexError:
- noname = noname + 1
- glyphName = 'ttxautoglyph%s' % i
- nextPos = int(loca[i+1])
- glyphdata = data[pos:nextPos]
- if len(glyphdata) != (nextPos - pos):
- raise ttLib.TTLibError("not enough 'glyf' table data")
- glyph = Glyph(glyphdata)
- self.glyphs[glyphName] = glyph
- pos = nextPos
- if len(data) - nextPos >= 4:
- log.warning(
- "too much 'glyf' table data: expected %d, received %d bytes",
- nextPos, len(data))
- if noname:
- log.warning('%s glyphs have no name', noname)
- if ttFont.lazy is False: # Be lazy for None and True
- self.ensureDecompiled()
-
- def ensureDecompiled(self, recurse=False):
- # The recurse argument is unused, but part of the signature of
- # ensureDecompiled across the library.
- for glyph in self.glyphs.values():
- glyph.expand(self)
-
- def compile(self, ttFont):
- if not hasattr(self, "glyphOrder"):
- self.glyphOrder = ttFont.getGlyphOrder()
- padding = self.padding
- assert padding in (0, 1, 2, 4)
- locations = []
- currentLocation = 0
- dataList = []
- recalcBBoxes = ttFont.recalcBBoxes
- for glyphName in self.glyphOrder:
- glyph = self.glyphs[glyphName]
- glyphData = glyph.compile(self, recalcBBoxes)
- if padding > 1:
- glyphData = pad(glyphData, size=padding)
- locations.append(currentLocation)
- currentLocation = currentLocation + len(glyphData)
- dataList.append(glyphData)
- locations.append(currentLocation)
-
- if padding == 1 and currentLocation < 0x20000:
- # See if we can pad any odd-lengthed glyphs to allow loca
- # table to use the short offsets.
- indices = [i for i,glyphData in enumerate(dataList) if len(glyphData) % 2 == 1]
- if indices and currentLocation + len(indices) < 0x20000:
- # It fits. Do it.
- for i in indices:
- dataList[i] += b'\0'
- currentLocation = 0
- for i,glyphData in enumerate(dataList):
- locations[i] = currentLocation
- currentLocation += len(glyphData)
- locations[len(dataList)] = currentLocation
-
- data = b''.join(dataList)
- if 'loca' in ttFont:
- ttFont['loca'].set(locations)
- if 'maxp' in ttFont:
- ttFont['maxp'].numGlyphs = len(self.glyphs)
- if not data:
- # As a special case when all glyph in the font are empty, add a zero byte
- # to the table, so that OTS doesn’t reject it, and to make the table work
- # on Windows as well.
- # See https://github.com/khaledhosny/ots/issues/52
- data = b"\0"
- return data
-
- def toXML(self, writer, ttFont, splitGlyphs=False):
- notice = (
- "The xMin, yMin, xMax and yMax values\n"
- "will be recalculated by the compiler.")
- glyphNames = ttFont.getGlyphNames()
- if not splitGlyphs:
- writer.newline()
- writer.comment(notice)
- writer.newline()
- writer.newline()
- numGlyphs = len(glyphNames)
- if splitGlyphs:
- path, ext = os.path.splitext(writer.file.name)
- existingGlyphFiles = set()
- for glyphName in glyphNames:
- glyph = self.get(glyphName)
- if glyph is None:
- log.warning("glyph '%s' does not exist in glyf table", glyphName)
- continue
- if glyph.numberOfContours:
- if splitGlyphs:
- glyphPath = userNameToFileName(
- tostr(glyphName, 'utf-8'),
- existingGlyphFiles,
- prefix=path + ".",
- suffix=ext)
- existingGlyphFiles.add(glyphPath.lower())
- glyphWriter = xmlWriter.XMLWriter(
- glyphPath, idlefunc=writer.idlefunc,
- newlinestr=writer.newlinestr)
- glyphWriter.begintag("ttFont", ttLibVersion=version)
- glyphWriter.newline()
- glyphWriter.begintag("glyf")
- glyphWriter.newline()
- glyphWriter.comment(notice)
- glyphWriter.newline()
- writer.simpletag("TTGlyph", src=os.path.basename(glyphPath))
- else:
- glyphWriter = writer
- glyphWriter.begintag('TTGlyph', [
- ("name", glyphName),
- ("xMin", glyph.xMin),
- ("yMin", glyph.yMin),
- ("xMax", glyph.xMax),
- ("yMax", glyph.yMax),
- ])
- glyphWriter.newline()
- glyph.toXML(glyphWriter, ttFont)
- glyphWriter.endtag('TTGlyph')
- glyphWriter.newline()
- if splitGlyphs:
- glyphWriter.endtag("glyf")
- glyphWriter.newline()
- glyphWriter.endtag("ttFont")
- glyphWriter.newline()
- glyphWriter.close()
- else:
- writer.simpletag('TTGlyph', name=glyphName)
- writer.comment("contains no outline data")
- if not splitGlyphs:
- writer.newline()
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name != "TTGlyph":
- return
- if not hasattr(self, "glyphs"):
- self.glyphs = {}
- if not hasattr(self, "glyphOrder"):
- self.glyphOrder = ttFont.getGlyphOrder()
- glyphName = attrs["name"]
- log.debug("unpacking glyph '%s'", glyphName)
- glyph = Glyph()
- for attr in ['xMin', 'yMin', 'xMax', 'yMax']:
- setattr(glyph, attr, safeEval(attrs.get(attr, '0')))
- self.glyphs[glyphName] = glyph
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- glyph.fromXML(name, attrs, content, ttFont)
- if not ttFont.recalcBBoxes:
- glyph.compact(self, 0)
-
- def setGlyphOrder(self, glyphOrder):
- """Sets the glyph order
-
- Args:
- glyphOrder ([str]): List of glyph names in order.
- """
- self.glyphOrder = glyphOrder
-
- def getGlyphName(self, glyphID):
- """Returns the name for the glyph with the given ID.
-
- Raises a ``KeyError`` if the glyph name is not found in the font.
- """
- return self.glyphOrder[glyphID]
-
- def getGlyphID(self, glyphName):
- """Returns the ID of the glyph with the given name.
-
- Raises a ``ValueError`` if the glyph is not found in the font.
- """
- # XXX optimize with reverse dict!!!
- return self.glyphOrder.index(glyphName)
-
- def removeHinting(self):
- """Removes TrueType hints from all glyphs in the glyphset.
-
- See :py:meth:`Glyph.removeHinting`.
- """
- for glyph in self.glyphs.values():
- glyph.removeHinting()
-
- def keys(self):
- return self.glyphs.keys()
-
- def has_key(self, glyphName):
- return glyphName in self.glyphs
-
- __contains__ = has_key
-
- def get(self, glyphName, default=None):
- glyph = self.glyphs.get(glyphName, default)
- if glyph is not None:
- glyph.expand(self)
- return glyph
-
- def __getitem__(self, glyphName):
- glyph = self.glyphs[glyphName]
- glyph.expand(self)
- return glyph
-
- def __setitem__(self, glyphName, glyph):
- self.glyphs[glyphName] = glyph
- if glyphName not in self.glyphOrder:
- self.glyphOrder.append(glyphName)
-
- def __delitem__(self, glyphName):
- del self.glyphs[glyphName]
- self.glyphOrder.remove(glyphName)
-
- def __len__(self):
- assert len(self.glyphOrder) == len(self.glyphs)
- return len(self.glyphs)
-
- def _getPhantomPoints(self, glyphName, hMetrics, vMetrics=None):
- """Compute the four "phantom points" for the given glyph from its bounding box
- and the horizontal and vertical advance widths and sidebearings stored in the
- ttFont's "hmtx" and "vmtx" tables.
-
- 'hMetrics' should be ttFont['hmtx'].metrics.
-
- 'vMetrics' should be ttFont['vmtx'].metrics if there is "vmtx" or None otherwise.
- If there is no vMetrics passed in, vertical phantom points are set to the zero coordinate.
-
- https://docs.microsoft.com/en-us/typography/opentype/spec/tt_instructing_glyphs#phantoms
- """
- glyph = self[glyphName]
- if not hasattr(glyph, 'xMin'):
- glyph.recalcBounds(self)
-
- horizontalAdvanceWidth, leftSideBearing = hMetrics[glyphName]
- leftSideX = glyph.xMin - leftSideBearing
- rightSideX = leftSideX + horizontalAdvanceWidth
-
- if vMetrics:
- verticalAdvanceWidth, topSideBearing = vMetrics[glyphName]
- topSideY = topSideBearing + glyph.yMax
- bottomSideY = topSideY - verticalAdvanceWidth
- else:
- bottomSideY = topSideY = 0
-
- return [
- (leftSideX, 0),
- (rightSideX, 0),
- (0, topSideY),
- (0, bottomSideY),
- ]
-
- def _getCoordinatesAndControls(self, glyphName, hMetrics, vMetrics=None):
- """Return glyph coordinates and controls as expected by "gvar" table.
-
- The coordinates includes four "phantom points" for the glyph metrics,
- as mandated by the "gvar" spec.
-
- The glyph controls is a namedtuple with the following attributes:
- - numberOfContours: -1 for composite glyphs.
- - endPts: list of indices of end points for each contour in simple
- glyphs, or component indices in composite glyphs (used for IUP
- optimization).
- - flags: array of contour point flags for simple glyphs (None for
- composite glyphs).
- - components: list of base glyph names (str) for each component in
- composite glyphs (None for simple glyphs).
-
- The "hMetrics" and vMetrics are used to compute the "phantom points" (see
- the "_getPhantomPoints" method).
-
- Return None if the requested glyphName is not present.
- """
- glyph = self.get(glyphName)
- if glyph is None:
- return None
- if glyph.isComposite():
- coords = GlyphCoordinates(
- [(getattr(c, 'x', 0), getattr(c, 'y', 0)) for c in glyph.components]
- )
- controls = _GlyphControls(
- numberOfContours=glyph.numberOfContours,
- endPts=list(range(len(glyph.components))),
- flags=None,
- components=[c.glyphName for c in glyph.components],
- )
- else:
- coords, endPts, flags = glyph.getCoordinates(self)
- coords = coords.copy()
- controls = _GlyphControls(
- numberOfContours=glyph.numberOfContours,
- endPts=endPts,
- flags=flags,
- components=None,
- )
- # Add phantom points for (left, right, top, bottom) positions.
- phantomPoints = self._getPhantomPoints(glyphName, hMetrics, vMetrics)
- coords.extend(phantomPoints)
- return coords, controls
-
- def _setCoordinates(self, glyphName, coord, hMetrics, vMetrics=None):
- """Set coordinates and metrics for the given glyph.
-
- "coord" is an array of GlyphCoordinates which must include the "phantom
- points" as the last four coordinates.
-
- Both the horizontal/vertical advances and left/top sidebearings in "hmtx"
- and "vmtx" tables (if any) are updated from four phantom points and
- the glyph's bounding boxes.
-
- The "hMetrics" and vMetrics are used to propagate "phantom points"
- into "hmtx" and "vmtx" tables if desired. (see the "_getPhantomPoints"
- method).
- """
- glyph = self[glyphName]
-
- # Handle phantom points for (left, right, top, bottom) positions.
- assert len(coord) >= 4
- leftSideX = coord[-4][0]
- rightSideX = coord[-3][0]
- topSideY = coord[-2][1]
- bottomSideY = coord[-1][1]
-
- coord = coord[:-4]
-
- if glyph.isComposite():
- assert len(coord) == len(glyph.components)
- for p, comp in zip(coord, glyph.components):
- if hasattr(comp, 'x'):
- comp.x, comp.y = p
- elif glyph.numberOfContours == 0:
- assert len(coord) == 0
- else:
- assert len(coord) == len(glyph.coordinates)
- glyph.coordinates = GlyphCoordinates(coord)
-
- glyph.recalcBounds(self)
-
- horizontalAdvanceWidth = otRound(rightSideX - leftSideX)
- if horizontalAdvanceWidth < 0:
- # unlikely, but it can happen, see:
- # https://github.com/fonttools/fonttools/pull/1198
- horizontalAdvanceWidth = 0
- leftSideBearing = otRound(glyph.xMin - leftSideX)
- hMetrics[glyphName] = horizontalAdvanceWidth, leftSideBearing
-
- if vMetrics is not None:
- verticalAdvanceWidth = otRound(topSideY - bottomSideY)
- if verticalAdvanceWidth < 0: # unlikely but do the same as horizontal
- verticalAdvanceWidth = 0
- topSideBearing = otRound(topSideY - glyph.yMax)
- vMetrics[glyphName] = verticalAdvanceWidth, topSideBearing
-
-
- # Deprecated
-
- def _synthesizeVMetrics(self, glyphName, ttFont, defaultVerticalOrigin):
- """This method is wrong and deprecated.
- For rationale see:
- https://github.com/fonttools/fonttools/pull/2266/files#r613569473
- """
- vMetrics = getattr(ttFont.get('vmtx'), 'metrics', None)
- if vMetrics is None:
- verticalAdvanceWidth = ttFont["head"].unitsPerEm
- topSideY = getattr(ttFont.get('hhea'), 'ascent', None)
- if topSideY is None:
- if defaultVerticalOrigin is not None:
- topSideY = defaultVerticalOrigin
- else:
- topSideY = verticalAdvanceWidth
- glyph = self[glyphName]
- glyph.recalcBounds(self)
- topSideBearing = otRound(topSideY - glyph.yMax)
- vMetrics = {glyphName: (verticalAdvanceWidth, topSideBearing)}
- return vMetrics
-
- @deprecateFunction("use '_getPhantomPoints' instead", category=DeprecationWarning)
- def getPhantomPoints(self, glyphName, ttFont, defaultVerticalOrigin=None):
- """Old public name for self._getPhantomPoints().
- See: https://github.com/fonttools/fonttools/pull/2266"""
- hMetrics = ttFont['hmtx'].metrics
- vMetrics = self._synthesizeVMetrics(glyphName, ttFont, defaultVerticalOrigin)
- return self._getPhantomPoints(glyphName, hMetrics, vMetrics)
-
- @deprecateFunction("use '_getCoordinatesAndControls' instead", category=DeprecationWarning)
- def getCoordinatesAndControls(self, glyphName, ttFont, defaultVerticalOrigin=None):
- """Old public name for self._getCoordinatesAndControls().
- See: https://github.com/fonttools/fonttools/pull/2266"""
- hMetrics = ttFont['hmtx'].metrics
- vMetrics = self._synthesizeVMetrics(glyphName, ttFont, defaultVerticalOrigin)
- return self._getCoordinatesAndControls(glyphName, hMetrics, vMetrics)
-
- @deprecateFunction("use '_setCoordinates' instead", category=DeprecationWarning)
- def setCoordinates(self, glyphName, ttFont):
- """Old public name for self._setCoordinates().
- See: https://github.com/fonttools/fonttools/pull/2266"""
- hMetrics = ttFont['hmtx'].metrics
- vMetrics = getattr(ttFont.get('vmtx'), 'metrics', None)
- self._setCoordinates(glyphName, hMetrics, vMetrics)
+ """Glyph Data Table
+
+ This class represents the `glyf <https://docs.microsoft.com/en-us/typography/opentype/spec/glyf>`_
+ table, which contains outlines for glyphs in TrueType format. In many cases,
+ it is easier to access and manipulate glyph outlines through the ``GlyphSet``
+ object returned from :py:meth:`fontTools.ttLib.ttFont.getGlyphSet`::
+
+ >> from fontTools.pens.boundsPen import BoundsPen
+ >> glyphset = font.getGlyphSet()
+ >> bp = BoundsPen(glyphset)
+ >> glyphset["A"].draw(bp)
+ >> bp.bounds
+ (19, 0, 633, 716)
+
+ However, this class can be used for low-level access to the ``glyf`` table data.
+ Objects of this class support dictionary-like access, mapping glyph names to
+ :py:class:`Glyph` objects::
+
+ >> glyf = font["glyf"]
+ >> len(glyf["Aacute"].components)
+ 2
+
+ Note that when adding glyphs to the font via low-level access to the ``glyf``
+ table, the new glyphs must also be added to the ``hmtx``/``vmtx`` table::
+
+ >> font["glyf"]["divisionslash"] = Glyph()
+ >> font["hmtx"]["divisionslash"] = (640, 0)
+
+ """
+
+ dependencies = ["fvar"]
+
+ # this attribute controls the amount of padding applied to glyph data upon compile.
+ # Glyph lenghts are aligned to multiples of the specified value.
+ # Allowed values are (0, 1, 2, 4). '0' means no padding; '1' (default) also means
+ # no padding, except for when padding would allow to use short loca offsets.
+ padding = 1
+
+ def decompile(self, data, ttFont):
+ self.axisTags = (
+ [axis.axisTag for axis in ttFont["fvar"].axes] if "fvar" in ttFont else []
+ )
+ loca = ttFont["loca"]
+ pos = int(loca[0])
+ nextPos = 0
+ noname = 0
+ self.glyphs = {}
+ self.glyphOrder = glyphOrder = ttFont.getGlyphOrder()
+ self._reverseGlyphOrder = {}
+ for i in range(0, len(loca) - 1):
+ try:
+ glyphName = glyphOrder[i]
+ except IndexError:
+ noname = noname + 1
+ glyphName = "ttxautoglyph%s" % i
+ nextPos = int(loca[i + 1])
+ glyphdata = data[pos:nextPos]
+ if len(glyphdata) != (nextPos - pos):
+ raise ttLib.TTLibError("not enough 'glyf' table data")
+ glyph = Glyph(glyphdata)
+ self.glyphs[glyphName] = glyph
+ pos = nextPos
+ if len(data) - nextPos >= 4:
+ log.warning(
+ "too much 'glyf' table data: expected %d, received %d bytes",
+ nextPos,
+ len(data),
+ )
+ if noname:
+ log.warning("%s glyphs have no name", noname)
+ if ttFont.lazy is False: # Be lazy for None and True
+ self.ensureDecompiled()
+
+ def ensureDecompiled(self, recurse=False):
+ # The recurse argument is unused, but part of the signature of
+ # ensureDecompiled across the library.
+ for glyph in self.glyphs.values():
+ glyph.expand(self)
+
+ def compile(self, ttFont):
+ self.axisTags = (
+ [axis.axisTag for axis in ttFont["fvar"].axes] if "fvar" in ttFont else []
+ )
+ if not hasattr(self, "glyphOrder"):
+ self.glyphOrder = ttFont.getGlyphOrder()
+ padding = self.padding
+ assert padding in (0, 1, 2, 4)
+ locations = []
+ currentLocation = 0
+ dataList = []
+ recalcBBoxes = ttFont.recalcBBoxes
+ boundsDone = set()
+ for glyphName in self.glyphOrder:
+ glyph = self.glyphs[glyphName]
+ glyphData = glyph.compile(self, recalcBBoxes, boundsDone=boundsDone)
+ if padding > 1:
+ glyphData = pad(glyphData, size=padding)
+ locations.append(currentLocation)
+ currentLocation = currentLocation + len(glyphData)
+ dataList.append(glyphData)
+ locations.append(currentLocation)
+
+ if padding == 1 and currentLocation < 0x20000:
+ # See if we can pad any odd-lengthed glyphs to allow loca
+ # table to use the short offsets.
+ indices = [
+ i for i, glyphData in enumerate(dataList) if len(glyphData) % 2 == 1
+ ]
+ if indices and currentLocation + len(indices) < 0x20000:
+ # It fits. Do it.
+ for i in indices:
+ dataList[i] += b"\0"
+ currentLocation = 0
+ for i, glyphData in enumerate(dataList):
+ locations[i] = currentLocation
+ currentLocation += len(glyphData)
+ locations[len(dataList)] = currentLocation
+
+ data = b"".join(dataList)
+ if "loca" in ttFont:
+ ttFont["loca"].set(locations)
+ if "maxp" in ttFont:
+ ttFont["maxp"].numGlyphs = len(self.glyphs)
+ if not data:
+ # As a special case when all glyph in the font are empty, add a zero byte
+ # to the table, so that OTS doesn’t reject it, and to make the table work
+ # on Windows as well.
+ # See https://github.com/khaledhosny/ots/issues/52
+ data = b"\0"
+ return data
+
+ def toXML(self, writer, ttFont, splitGlyphs=False):
+ notice = (
+ "The xMin, yMin, xMax and yMax values\n"
+ "will be recalculated by the compiler."
+ )
+ glyphNames = ttFont.getGlyphNames()
+ if not splitGlyphs:
+ writer.newline()
+ writer.comment(notice)
+ writer.newline()
+ writer.newline()
+ numGlyphs = len(glyphNames)
+ if splitGlyphs:
+ path, ext = os.path.splitext(writer.file.name)
+ existingGlyphFiles = set()
+ for glyphName in glyphNames:
+ glyph = self.get(glyphName)
+ if glyph is None:
+ log.warning("glyph '%s' does not exist in glyf table", glyphName)
+ continue
+ if glyph.numberOfContours:
+ if splitGlyphs:
+ glyphPath = userNameToFileName(
+ tostr(glyphName, "utf-8"),
+ existingGlyphFiles,
+ prefix=path + ".",
+ suffix=ext,
+ )
+ existingGlyphFiles.add(glyphPath.lower())
+ glyphWriter = xmlWriter.XMLWriter(
+ glyphPath,
+ idlefunc=writer.idlefunc,
+ newlinestr=writer.newlinestr,
+ )
+ glyphWriter.begintag("ttFont", ttLibVersion=version)
+ glyphWriter.newline()
+ glyphWriter.begintag("glyf")
+ glyphWriter.newline()
+ glyphWriter.comment(notice)
+ glyphWriter.newline()
+ writer.simpletag("TTGlyph", src=os.path.basename(glyphPath))
+ else:
+ glyphWriter = writer
+ glyphWriter.begintag(
+ "TTGlyph",
+ [
+ ("name", glyphName),
+ ("xMin", glyph.xMin),
+ ("yMin", glyph.yMin),
+ ("xMax", glyph.xMax),
+ ("yMax", glyph.yMax),
+ ],
+ )
+ glyphWriter.newline()
+ glyph.toXML(glyphWriter, ttFont)
+ glyphWriter.endtag("TTGlyph")
+ glyphWriter.newline()
+ if splitGlyphs:
+ glyphWriter.endtag("glyf")
+ glyphWriter.newline()
+ glyphWriter.endtag("ttFont")
+ glyphWriter.newline()
+ glyphWriter.close()
+ else:
+ writer.simpletag("TTGlyph", name=glyphName)
+ writer.comment("contains no outline data")
+ if not splitGlyphs:
+ writer.newline()
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name != "TTGlyph":
+ return
+ if not hasattr(self, "glyphs"):
+ self.glyphs = {}
+ if not hasattr(self, "glyphOrder"):
+ self.glyphOrder = ttFont.getGlyphOrder()
+ glyphName = attrs["name"]
+ log.debug("unpacking glyph '%s'", glyphName)
+ glyph = Glyph()
+ for attr in ["xMin", "yMin", "xMax", "yMax"]:
+ setattr(glyph, attr, safeEval(attrs.get(attr, "0")))
+ self.glyphs[glyphName] = glyph
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ glyph.fromXML(name, attrs, content, ttFont)
+ if not ttFont.recalcBBoxes:
+ glyph.compact(self, 0)
+
+ def setGlyphOrder(self, glyphOrder):
+ """Sets the glyph order
+
+ Args:
+ glyphOrder ([str]): List of glyph names in order.
+ """
+ self.glyphOrder = glyphOrder
+ self._reverseGlyphOrder = {}
+
+ def getGlyphName(self, glyphID):
+ """Returns the name for the glyph with the given ID.
+
+ Raises a ``KeyError`` if the glyph name is not found in the font.
+ """
+ return self.glyphOrder[glyphID]
+
+ def _buildReverseGlyphOrderDict(self):
+ self._reverseGlyphOrder = d = {}
+ for glyphID, glyphName in enumerate(self.glyphOrder):
+ d[glyphName] = glyphID
+
+ def getGlyphID(self, glyphName):
+ """Returns the ID of the glyph with the given name.
+
+ Raises a ``ValueError`` if the glyph is not found in the font.
+ """
+ glyphOrder = self.glyphOrder
+ id = getattr(self, "_reverseGlyphOrder", {}).get(glyphName)
+ if id is None or id >= len(glyphOrder) or glyphOrder[id] != glyphName:
+ self._buildReverseGlyphOrderDict()
+ id = self._reverseGlyphOrder.get(glyphName)
+ if id is None:
+ raise ValueError(glyphName)
+ return id
+
+ def removeHinting(self):
+ """Removes TrueType hints from all glyphs in the glyphset.
+
+ See :py:meth:`Glyph.removeHinting`.
+ """
+ for glyph in self.glyphs.values():
+ glyph.removeHinting()
+
+ def keys(self):
+ return self.glyphs.keys()
+
+ def has_key(self, glyphName):
+ return glyphName in self.glyphs
+
+ __contains__ = has_key
+
+ def get(self, glyphName, default=None):
+ glyph = self.glyphs.get(glyphName, default)
+ if glyph is not None:
+ glyph.expand(self)
+ return glyph
+
+ def __getitem__(self, glyphName):
+ glyph = self.glyphs[glyphName]
+ glyph.expand(self)
+ return glyph
+
+ def __setitem__(self, glyphName, glyph):
+ self.glyphs[glyphName] = glyph
+ if glyphName not in self.glyphOrder:
+ self.glyphOrder.append(glyphName)
+
+ def __delitem__(self, glyphName):
+ del self.glyphs[glyphName]
+ self.glyphOrder.remove(glyphName)
+
+ def __len__(self):
+ assert len(self.glyphOrder) == len(self.glyphs)
+ return len(self.glyphs)
+
+ def _getPhantomPoints(self, glyphName, hMetrics, vMetrics=None):
+ """Compute the four "phantom points" for the given glyph from its bounding box
+ and the horizontal and vertical advance widths and sidebearings stored in the
+ ttFont's "hmtx" and "vmtx" tables.
+
+ 'hMetrics' should be ttFont['hmtx'].metrics.
+
+ 'vMetrics' should be ttFont['vmtx'].metrics if there is "vmtx" or None otherwise.
+ If there is no vMetrics passed in, vertical phantom points are set to the zero coordinate.
+
+ https://docs.microsoft.com/en-us/typography/opentype/spec/tt_instructing_glyphs#phantoms
+ """
+ glyph = self[glyphName]
+ if not hasattr(glyph, "xMin"):
+ glyph.recalcBounds(self)
+
+ horizontalAdvanceWidth, leftSideBearing = hMetrics[glyphName]
+ leftSideX = glyph.xMin - leftSideBearing
+ rightSideX = leftSideX + horizontalAdvanceWidth
+
+ if vMetrics:
+ verticalAdvanceWidth, topSideBearing = vMetrics[glyphName]
+ topSideY = topSideBearing + glyph.yMax
+ bottomSideY = topSideY - verticalAdvanceWidth
+ else:
+ bottomSideY = topSideY = 0
+
+ return [
+ (leftSideX, 0),
+ (rightSideX, 0),
+ (0, topSideY),
+ (0, bottomSideY),
+ ]
+
+ def _getCoordinatesAndControls(
+ self, glyphName, hMetrics, vMetrics=None, *, round=otRound
+ ):
+ """Return glyph coordinates and controls as expected by "gvar" table.
+
+ The coordinates includes four "phantom points" for the glyph metrics,
+ as mandated by the "gvar" spec.
+
+ The glyph controls is a namedtuple with the following attributes:
+ - numberOfContours: -1 for composite glyphs.
+ - endPts: list of indices of end points for each contour in simple
+ glyphs, or component indices in composite glyphs (used for IUP
+ optimization).
+ - flags: array of contour point flags for simple glyphs (None for
+ composite glyphs).
+ - components: list of base glyph names (str) for each component in
+ composite glyphs (None for simple glyphs).
+
+ The "hMetrics" and vMetrics are used to compute the "phantom points" (see
+ the "_getPhantomPoints" method).
+
+ Return None if the requested glyphName is not present.
+ """
+ glyph = self.get(glyphName)
+ if glyph is None:
+ return None
+ if glyph.isComposite():
+ coords = GlyphCoordinates(
+ [(getattr(c, "x", 0), getattr(c, "y", 0)) for c in glyph.components]
+ )
+ controls = _GlyphControls(
+ numberOfContours=glyph.numberOfContours,
+ endPts=list(range(len(glyph.components))),
+ flags=None,
+ components=[
+ (c.glyphName, getattr(c, "transform", None))
+ for c in glyph.components
+ ],
+ )
+ elif glyph.isVarComposite():
+ coords = []
+ controls = []
+
+ for component in glyph.components:
+ (
+ componentCoords,
+ componentControls,
+ ) = component.getCoordinatesAndControls()
+ coords.extend(componentCoords)
+ controls.extend(componentControls)
+
+ coords = GlyphCoordinates(coords)
+
+ controls = _GlyphControls(
+ numberOfContours=glyph.numberOfContours,
+ endPts=list(range(len(coords))),
+ flags=None,
+ components=[
+ (c.glyphName, getattr(c, "flags", None)) for c in glyph.components
+ ],
+ )
+
+ else:
+ coords, endPts, flags = glyph.getCoordinates(self)
+ coords = coords.copy()
+ controls = _GlyphControls(
+ numberOfContours=glyph.numberOfContours,
+ endPts=endPts,
+ flags=flags,
+ components=None,
+ )
+ # Add phantom points for (left, right, top, bottom) positions.
+ phantomPoints = self._getPhantomPoints(glyphName, hMetrics, vMetrics)
+ coords.extend(phantomPoints)
+ coords.toInt(round=round)
+ return coords, controls
+
+ def _setCoordinates(self, glyphName, coord, hMetrics, vMetrics=None):
+ """Set coordinates and metrics for the given glyph.
+
+ "coord" is an array of GlyphCoordinates which must include the "phantom
+ points" as the last four coordinates.
+
+ Both the horizontal/vertical advances and left/top sidebearings in "hmtx"
+ and "vmtx" tables (if any) are updated from four phantom points and
+ the glyph's bounding boxes.
+
+ The "hMetrics" and vMetrics are used to propagate "phantom points"
+ into "hmtx" and "vmtx" tables if desired. (see the "_getPhantomPoints"
+ method).
+ """
+ glyph = self[glyphName]
+
+ # Handle phantom points for (left, right, top, bottom) positions.
+ assert len(coord) >= 4
+ leftSideX = coord[-4][0]
+ rightSideX = coord[-3][0]
+ topSideY = coord[-2][1]
+ bottomSideY = coord[-1][1]
+
+ coord = coord[:-4]
+
+ if glyph.isComposite():
+ assert len(coord) == len(glyph.components)
+ for p, comp in zip(coord, glyph.components):
+ if hasattr(comp, "x"):
+ comp.x, comp.y = p
+ elif glyph.isVarComposite():
+ for comp in glyph.components:
+ coord = comp.setCoordinates(coord)
+ assert not coord
+ elif glyph.numberOfContours == 0:
+ assert len(coord) == 0
+ else:
+ assert len(coord) == len(glyph.coordinates)
+ glyph.coordinates = GlyphCoordinates(coord)
+
+ glyph.recalcBounds(self, boundsDone=set())
+
+ horizontalAdvanceWidth = otRound(rightSideX - leftSideX)
+ if horizontalAdvanceWidth < 0:
+ # unlikely, but it can happen, see:
+ # https://github.com/fonttools/fonttools/pull/1198
+ horizontalAdvanceWidth = 0
+ leftSideBearing = otRound(glyph.xMin - leftSideX)
+ hMetrics[glyphName] = horizontalAdvanceWidth, leftSideBearing
+
+ if vMetrics is not None:
+ verticalAdvanceWidth = otRound(topSideY - bottomSideY)
+ if verticalAdvanceWidth < 0: # unlikely but do the same as horizontal
+ verticalAdvanceWidth = 0
+ topSideBearing = otRound(topSideY - glyph.yMax)
+ vMetrics[glyphName] = verticalAdvanceWidth, topSideBearing
+
+ # Deprecated
+
+ def _synthesizeVMetrics(self, glyphName, ttFont, defaultVerticalOrigin):
+ """This method is wrong and deprecated.
+ For rationale see:
+ https://github.com/fonttools/fonttools/pull/2266/files#r613569473
+ """
+ vMetrics = getattr(ttFont.get("vmtx"), "metrics", None)
+ if vMetrics is None:
+ verticalAdvanceWidth = ttFont["head"].unitsPerEm
+ topSideY = getattr(ttFont.get("hhea"), "ascent", None)
+ if topSideY is None:
+ if defaultVerticalOrigin is not None:
+ topSideY = defaultVerticalOrigin
+ else:
+ topSideY = verticalAdvanceWidth
+ glyph = self[glyphName]
+ glyph.recalcBounds(self)
+ topSideBearing = otRound(topSideY - glyph.yMax)
+ vMetrics = {glyphName: (verticalAdvanceWidth, topSideBearing)}
+ return vMetrics
+
+ @deprecateFunction("use '_getPhantomPoints' instead", category=DeprecationWarning)
+ def getPhantomPoints(self, glyphName, ttFont, defaultVerticalOrigin=None):
+ """Old public name for self._getPhantomPoints().
+ See: https://github.com/fonttools/fonttools/pull/2266"""
+ hMetrics = ttFont["hmtx"].metrics
+ vMetrics = self._synthesizeVMetrics(glyphName, ttFont, defaultVerticalOrigin)
+ return self._getPhantomPoints(glyphName, hMetrics, vMetrics)
+
+ @deprecateFunction(
+ "use '_getCoordinatesAndControls' instead", category=DeprecationWarning
+ )
+ def getCoordinatesAndControls(self, glyphName, ttFont, defaultVerticalOrigin=None):
+ """Old public name for self._getCoordinatesAndControls().
+ See: https://github.com/fonttools/fonttools/pull/2266"""
+ hMetrics = ttFont["hmtx"].metrics
+ vMetrics = self._synthesizeVMetrics(glyphName, ttFont, defaultVerticalOrigin)
+ return self._getCoordinatesAndControls(glyphName, hMetrics, vMetrics)
+
+ @deprecateFunction("use '_setCoordinates' instead", category=DeprecationWarning)
+ def setCoordinates(self, glyphName, ttFont):
+ """Old public name for self._setCoordinates().
+ See: https://github.com/fonttools/fonttools/pull/2266"""
+ hMetrics = ttFont["hmtx"].metrics
+ vMetrics = getattr(ttFont.get("vmtx"), "metrics", None)
+ self._setCoordinates(glyphName, hMetrics, vMetrics)
_GlyphControls = namedtuple(
- "_GlyphControls", "numberOfContours endPts flags components"
+ "_GlyphControls", "numberOfContours endPts flags components"
)
@@ -513,1338 +587,2094 @@ flagOnCurve = 0x01
flagXShort = 0x02
flagYShort = 0x04
flagRepeat = 0x08
-flagXsame = 0x10
+flagXsame = 0x10
flagYsame = 0x20
flagOverlapSimple = 0x40
-flagReserved = 0x80
+flagCubic = 0x80
# These flags are kept for XML output after decompiling the coordinates
-keepFlags = flagOnCurve + flagOverlapSimple
+keepFlags = flagOnCurve + flagOverlapSimple + flagCubic
_flagSignBytes = {
- 0: 2,
- flagXsame: 0,
- flagXShort|flagXsame: +1,
- flagXShort: -1,
- flagYsame: 0,
- flagYShort|flagYsame: +1,
- flagYShort: -1,
+ 0: 2,
+ flagXsame: 0,
+ flagXShort | flagXsame: +1,
+ flagXShort: -1,
+ flagYsame: 0,
+ flagYShort | flagYsame: +1,
+ flagYShort: -1,
}
+
def flagBest(x, y, onCurve):
- """For a given x,y delta pair, returns the flag that packs this pair
- most efficiently, as well as the number of byte cost of such flag."""
-
- flag = flagOnCurve if onCurve else 0
- cost = 0
- # do x
- if x == 0:
- flag = flag | flagXsame
- elif -255 <= x <= 255:
- flag = flag | flagXShort
- if x > 0:
- flag = flag | flagXsame
- cost += 1
- else:
- cost += 2
- # do y
- if y == 0:
- flag = flag | flagYsame
- elif -255 <= y <= 255:
- flag = flag | flagYShort
- if y > 0:
- flag = flag | flagYsame
- cost += 1
- else:
- cost += 2
- return flag, cost
+ """For a given x,y delta pair, returns the flag that packs this pair
+ most efficiently, as well as the number of byte cost of such flag."""
+
+ flag = flagOnCurve if onCurve else 0
+ cost = 0
+ # do x
+ if x == 0:
+ flag = flag | flagXsame
+ elif -255 <= x <= 255:
+ flag = flag | flagXShort
+ if x > 0:
+ flag = flag | flagXsame
+ cost += 1
+ else:
+ cost += 2
+ # do y
+ if y == 0:
+ flag = flag | flagYsame
+ elif -255 <= y <= 255:
+ flag = flag | flagYShort
+ if y > 0:
+ flag = flag | flagYsame
+ cost += 1
+ else:
+ cost += 2
+ return flag, cost
+
def flagFits(newFlag, oldFlag, mask):
- newBytes = _flagSignBytes[newFlag & mask]
- oldBytes = _flagSignBytes[oldFlag & mask]
- return newBytes == oldBytes or abs(newBytes) > abs(oldBytes)
+ newBytes = _flagSignBytes[newFlag & mask]
+ oldBytes = _flagSignBytes[oldFlag & mask]
+ return newBytes == oldBytes or abs(newBytes) > abs(oldBytes)
+
def flagSupports(newFlag, oldFlag):
- return ((oldFlag & flagOnCurve) == (newFlag & flagOnCurve) and
- flagFits(newFlag, oldFlag, flagXsame|flagXShort) and
- flagFits(newFlag, oldFlag, flagYsame|flagYShort))
+ return (
+ (oldFlag & flagOnCurve) == (newFlag & flagOnCurve)
+ and flagFits(newFlag, oldFlag, flagXsame | flagXShort)
+ and flagFits(newFlag, oldFlag, flagYsame | flagYShort)
+ )
+
def flagEncodeCoord(flag, mask, coord, coordBytes):
- byteCount = _flagSignBytes[flag & mask]
- if byteCount == 1:
- coordBytes.append(coord)
- elif byteCount == -1:
- coordBytes.append(-coord)
- elif byteCount == 2:
- coordBytes.extend(struct.pack('>h', coord))
+ byteCount = _flagSignBytes[flag & mask]
+ if byteCount == 1:
+ coordBytes.append(coord)
+ elif byteCount == -1:
+ coordBytes.append(-coord)
+ elif byteCount == 2:
+ coordBytes.extend(struct.pack(">h", coord))
+
def flagEncodeCoords(flag, x, y, xBytes, yBytes):
- flagEncodeCoord(flag, flagXsame|flagXShort, x, xBytes)
- flagEncodeCoord(flag, flagYsame|flagYShort, y, yBytes)
+ flagEncodeCoord(flag, flagXsame | flagXShort, x, xBytes)
+ flagEncodeCoord(flag, flagYsame | flagYShort, y, yBytes)
+
+
+ARG_1_AND_2_ARE_WORDS = 0x0001 # if set args are words otherwise they are bytes
+ARGS_ARE_XY_VALUES = 0x0002 # if set args are xy values, otherwise they are points
+ROUND_XY_TO_GRID = 0x0004 # for the xy values if above is true
+WE_HAVE_A_SCALE = 0x0008 # Sx = Sy, otherwise scale == 1.0
+NON_OVERLAPPING = 0x0010 # set to same value for all components (obsolete!)
+MORE_COMPONENTS = 0x0020 # indicates at least one more glyph after this one
+WE_HAVE_AN_X_AND_Y_SCALE = 0x0040 # Sx, Sy
+WE_HAVE_A_TWO_BY_TWO = 0x0080 # t00, t01, t10, t11
+WE_HAVE_INSTRUCTIONS = 0x0100 # instructions follow
+USE_MY_METRICS = 0x0200 # apply these metrics to parent glyph
+OVERLAP_COMPOUND = 0x0400 # used by Apple in GX fonts
+SCALED_COMPONENT_OFFSET = 0x0800 # composite designed to have the component offset scaled (designed for Apple)
+UNSCALED_COMPONENT_OFFSET = 0x1000 # composite designed not to have the component offset scaled (designed for MS)
+
+
+CompositeMaxpValues = namedtuple(
+ "CompositeMaxpValues", ["nPoints", "nContours", "maxComponentDepth"]
+)
-ARG_1_AND_2_ARE_WORDS = 0x0001 # if set args are words otherwise they are bytes
-ARGS_ARE_XY_VALUES = 0x0002 # if set args are xy values, otherwise they are points
-ROUND_XY_TO_GRID = 0x0004 # for the xy values if above is true
-WE_HAVE_A_SCALE = 0x0008 # Sx = Sy, otherwise scale == 1.0
-NON_OVERLAPPING = 0x0010 # set to same value for all components (obsolete!)
-MORE_COMPONENTS = 0x0020 # indicates at least one more glyph after this one
-WE_HAVE_AN_X_AND_Y_SCALE = 0x0040 # Sx, Sy
-WE_HAVE_A_TWO_BY_TWO = 0x0080 # t00, t01, t10, t11
-WE_HAVE_INSTRUCTIONS = 0x0100 # instructions follow
-USE_MY_METRICS = 0x0200 # apply these metrics to parent glyph
-OVERLAP_COMPOUND = 0x0400 # used by Apple in GX fonts
-SCALED_COMPONENT_OFFSET = 0x0800 # composite designed to have the component offset scaled (designed for Apple)
-UNSCALED_COMPONENT_OFFSET = 0x1000 # composite designed not to have the component offset scaled (designed for MS)
+class Glyph(object):
+ """This class represents an individual TrueType glyph.
+
+ TrueType glyph objects come in two flavours: simple and composite. Simple
+ glyph objects contain contours, represented via the ``.coordinates``,
+ ``.flags``, ``.numberOfContours``, and ``.endPtsOfContours`` attributes;
+ composite glyphs contain components, available through the ``.components``
+ attributes.
+
+ Because the ``.coordinates`` attribute (and other simple glyph attributes mentioned
+ above) is only set on simple glyphs and the ``.components`` attribute is only
+ set on composite glyphs, it is necessary to use the :py:meth:`isComposite`
+ method to test whether a glyph is simple or composite before attempting to
+ access its data.
+
+ For a composite glyph, the components can also be accessed via array-like access::
+
+ >> assert(font["glyf"]["Aacute"].isComposite())
+ >> font["glyf"]["Aacute"][0]
+ <fontTools.ttLib.tables._g_l_y_f.GlyphComponent at 0x1027b2ee0>
+
+ """
+
+ def __init__(self, data=b""):
+ if not data:
+ # empty char
+ self.numberOfContours = 0
+ return
+ self.data = data
+
+ def compact(self, glyfTable, recalcBBoxes=True):
+ data = self.compile(glyfTable, recalcBBoxes)
+ self.__dict__.clear()
+ self.data = data
+
+ def expand(self, glyfTable):
+ if not hasattr(self, "data"):
+ # already unpacked
+ return
+ if not self.data:
+ # empty char
+ del self.data
+ self.numberOfContours = 0
+ return
+ dummy, data = sstruct.unpack2(glyphHeaderFormat, self.data, self)
+ del self.data
+ # Some fonts (eg. Neirizi.ttf) have a 0 for numberOfContours in
+ # some glyphs; decompileCoordinates assumes that there's at least
+ # one, so short-circuit here.
+ if self.numberOfContours == 0:
+ return
+ if self.isComposite():
+ self.decompileComponents(data, glyfTable)
+ elif self.isVarComposite():
+ self.decompileVarComponents(data, glyfTable)
+ else:
+ self.decompileCoordinates(data)
+
+ def compile(self, glyfTable, recalcBBoxes=True, *, boundsDone=None):
+ if hasattr(self, "data"):
+ if recalcBBoxes:
+ # must unpack glyph in order to recalculate bounding box
+ self.expand(glyfTable)
+ else:
+ return self.data
+ if self.numberOfContours == 0:
+ return b""
+
+ if recalcBBoxes:
+ self.recalcBounds(glyfTable, boundsDone=boundsDone)
+
+ data = sstruct.pack(glyphHeaderFormat, self)
+ if self.isComposite():
+ data = data + self.compileComponents(glyfTable)
+ elif self.isVarComposite():
+ data = data + self.compileVarComponents(glyfTable)
+ else:
+ data = data + self.compileCoordinates()
+ return data
+
+ def toXML(self, writer, ttFont):
+ if self.isComposite():
+ for compo in self.components:
+ compo.toXML(writer, ttFont)
+ haveInstructions = hasattr(self, "program")
+ elif self.isVarComposite():
+ for compo in self.components:
+ compo.toXML(writer, ttFont)
+ haveInstructions = False
+ else:
+ last = 0
+ for i in range(self.numberOfContours):
+ writer.begintag("contour")
+ writer.newline()
+ for j in range(last, self.endPtsOfContours[i] + 1):
+ attrs = [
+ ("x", self.coordinates[j][0]),
+ ("y", self.coordinates[j][1]),
+ ("on", self.flags[j] & flagOnCurve),
+ ]
+ if self.flags[j] & flagOverlapSimple:
+ # Apple's rasterizer uses flagOverlapSimple in the first contour/first pt to flag glyphs that contain overlapping contours
+ attrs.append(("overlap", 1))
+ if self.flags[j] & flagCubic:
+ attrs.append(("cubic", 1))
+ writer.simpletag("pt", attrs)
+ writer.newline()
+ last = self.endPtsOfContours[i] + 1
+ writer.endtag("contour")
+ writer.newline()
+ haveInstructions = self.numberOfContours > 0
+ if haveInstructions:
+ if self.program:
+ writer.begintag("instructions")
+ writer.newline()
+ self.program.toXML(writer, ttFont)
+ writer.endtag("instructions")
+ else:
+ writer.simpletag("instructions")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "contour":
+ if self.numberOfContours < 0:
+ raise ttLib.TTLibError("can't mix composites and contours in glyph")
+ self.numberOfContours = self.numberOfContours + 1
+ coordinates = GlyphCoordinates()
+ flags = bytearray()
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name != "pt":
+ continue # ignore anything but "pt"
+ coordinates.append((safeEval(attrs["x"]), safeEval(attrs["y"])))
+ flag = bool(safeEval(attrs["on"]))
+ if "overlap" in attrs and bool(safeEval(attrs["overlap"])):
+ flag |= flagOverlapSimple
+ if "cubic" in attrs and bool(safeEval(attrs["cubic"])):
+ flag |= flagCubic
+ flags.append(flag)
+ if not hasattr(self, "coordinates"):
+ self.coordinates = coordinates
+ self.flags = flags
+ self.endPtsOfContours = [len(coordinates) - 1]
+ else:
+ self.coordinates.extend(coordinates)
+ self.flags.extend(flags)
+ self.endPtsOfContours.append(len(self.coordinates) - 1)
+ elif name == "component":
+ if self.numberOfContours > 0:
+ raise ttLib.TTLibError("can't mix composites and contours in glyph")
+ self.numberOfContours = -1
+ if not hasattr(self, "components"):
+ self.components = []
+ component = GlyphComponent()
+ self.components.append(component)
+ component.fromXML(name, attrs, content, ttFont)
+ elif name == "varComponent":
+ if self.numberOfContours > 0:
+ raise ttLib.TTLibError("can't mix composites and contours in glyph")
+ self.numberOfContours = -2
+ if not hasattr(self, "components"):
+ self.components = []
+ component = GlyphVarComponent()
+ self.components.append(component)
+ component.fromXML(name, attrs, content, ttFont)
+ elif name == "instructions":
+ self.program = ttProgram.Program()
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ self.program.fromXML(name, attrs, content, ttFont)
+
+ def getCompositeMaxpValues(self, glyfTable, maxComponentDepth=1):
+ assert self.isComposite() or self.isVarComposite()
+ nContours = 0
+ nPoints = 0
+ initialMaxComponentDepth = maxComponentDepth
+ for compo in self.components:
+ baseGlyph = glyfTable[compo.glyphName]
+ if baseGlyph.numberOfContours == 0:
+ continue
+ elif baseGlyph.numberOfContours > 0:
+ nP, nC = baseGlyph.getMaxpValues()
+ else:
+ nP, nC, componentDepth = baseGlyph.getCompositeMaxpValues(
+ glyfTable, initialMaxComponentDepth + 1
+ )
+ maxComponentDepth = max(maxComponentDepth, componentDepth)
+ nPoints = nPoints + nP
+ nContours = nContours + nC
+ return CompositeMaxpValues(nPoints, nContours, maxComponentDepth)
+
+ def getMaxpValues(self):
+ assert self.numberOfContours > 0
+ return len(self.coordinates), len(self.endPtsOfContours)
+
+ def decompileComponents(self, data, glyfTable):
+ self.components = []
+ more = 1
+ haveInstructions = 0
+ while more:
+ component = GlyphComponent()
+ more, haveInstr, data = component.decompile(data, glyfTable)
+ haveInstructions = haveInstructions | haveInstr
+ self.components.append(component)
+ if haveInstructions:
+ (numInstructions,) = struct.unpack(">h", data[:2])
+ data = data[2:]
+ self.program = ttProgram.Program()
+ self.program.fromBytecode(data[:numInstructions])
+ data = data[numInstructions:]
+ if len(data) >= 4:
+ log.warning(
+ "too much glyph data at the end of composite glyph: %d excess bytes",
+ len(data),
+ )
+
+ def decompileVarComponents(self, data, glyfTable):
+ self.components = []
+ while len(data) >= GlyphVarComponent.MIN_SIZE:
+ component = GlyphVarComponent()
+ data = component.decompile(data, glyfTable)
+ self.components.append(component)
+
+ def decompileCoordinates(self, data):
+ endPtsOfContours = array.array("H")
+ endPtsOfContours.frombytes(data[: 2 * self.numberOfContours])
+ if sys.byteorder != "big":
+ endPtsOfContours.byteswap()
+ self.endPtsOfContours = endPtsOfContours.tolist()
+
+ pos = 2 * self.numberOfContours
+ (instructionLength,) = struct.unpack(">h", data[pos : pos + 2])
+ self.program = ttProgram.Program()
+ self.program.fromBytecode(data[pos + 2 : pos + 2 + instructionLength])
+ pos += 2 + instructionLength
+ nCoordinates = self.endPtsOfContours[-1] + 1
+ flags, xCoordinates, yCoordinates = self.decompileCoordinatesRaw(
+ nCoordinates, data, pos
+ )
+
+ # fill in repetitions and apply signs
+ self.coordinates = coordinates = GlyphCoordinates.zeros(nCoordinates)
+ xIndex = 0
+ yIndex = 0
+ for i in range(nCoordinates):
+ flag = flags[i]
+ # x coordinate
+ if flag & flagXShort:
+ if flag & flagXsame:
+ x = xCoordinates[xIndex]
+ else:
+ x = -xCoordinates[xIndex]
+ xIndex = xIndex + 1
+ elif flag & flagXsame:
+ x = 0
+ else:
+ x = xCoordinates[xIndex]
+ xIndex = xIndex + 1
+ # y coordinate
+ if flag & flagYShort:
+ if flag & flagYsame:
+ y = yCoordinates[yIndex]
+ else:
+ y = -yCoordinates[yIndex]
+ yIndex = yIndex + 1
+ elif flag & flagYsame:
+ y = 0
+ else:
+ y = yCoordinates[yIndex]
+ yIndex = yIndex + 1
+ coordinates[i] = (x, y)
+ assert xIndex == len(xCoordinates)
+ assert yIndex == len(yCoordinates)
+ coordinates.relativeToAbsolute()
+ # discard all flags except "keepFlags"
+ for i in range(len(flags)):
+ flags[i] &= keepFlags
+ self.flags = flags
+
+ def decompileCoordinatesRaw(self, nCoordinates, data, pos=0):
+ # unpack flags and prepare unpacking of coordinates
+ flags = bytearray(nCoordinates)
+ # Warning: deep Python trickery going on. We use the struct module to unpack
+ # the coordinates. We build a format string based on the flags, so we can
+ # unpack the coordinates in one struct.unpack() call.
+ xFormat = ">" # big endian
+ yFormat = ">" # big endian
+ j = 0
+ while True:
+ flag = data[pos]
+ pos += 1
+ repeat = 1
+ if flag & flagRepeat:
+ repeat = data[pos] + 1
+ pos += 1
+ for k in range(repeat):
+ if flag & flagXShort:
+ xFormat = xFormat + "B"
+ elif not (flag & flagXsame):
+ xFormat = xFormat + "h"
+ if flag & flagYShort:
+ yFormat = yFormat + "B"
+ elif not (flag & flagYsame):
+ yFormat = yFormat + "h"
+ flags[j] = flag
+ j = j + 1
+ if j >= nCoordinates:
+ break
+ assert j == nCoordinates, "bad glyph flags"
+ # unpack raw coordinates, krrrrrr-tching!
+ xDataLen = struct.calcsize(xFormat)
+ yDataLen = struct.calcsize(yFormat)
+ if len(data) - pos - (xDataLen + yDataLen) >= 4:
+ log.warning(
+ "too much glyph data: %d excess bytes",
+ len(data) - pos - (xDataLen + yDataLen),
+ )
+ xCoordinates = struct.unpack(xFormat, data[pos : pos + xDataLen])
+ yCoordinates = struct.unpack(
+ yFormat, data[pos + xDataLen : pos + xDataLen + yDataLen]
+ )
+ return flags, xCoordinates, yCoordinates
+
+ def compileComponents(self, glyfTable):
+ data = b""
+ lastcomponent = len(self.components) - 1
+ more = 1
+ haveInstructions = 0
+ for i in range(len(self.components)):
+ if i == lastcomponent:
+ haveInstructions = hasattr(self, "program")
+ more = 0
+ compo = self.components[i]
+ data = data + compo.compile(more, haveInstructions, glyfTable)
+ if haveInstructions:
+ instructions = self.program.getBytecode()
+ data = data + struct.pack(">h", len(instructions)) + instructions
+ return data
+
+ def compileVarComponents(self, glyfTable):
+ return b"".join(c.compile(glyfTable) for c in self.components)
+
+ def compileCoordinates(self):
+ assert len(self.coordinates) == len(self.flags)
+ data = []
+ endPtsOfContours = array.array("H", self.endPtsOfContours)
+ if sys.byteorder != "big":
+ endPtsOfContours.byteswap()
+ data.append(endPtsOfContours.tobytes())
+ instructions = self.program.getBytecode()
+ data.append(struct.pack(">h", len(instructions)))
+ data.append(instructions)
+
+ deltas = self.coordinates.copy()
+ deltas.toInt()
+ deltas.absoluteToRelative()
+
+ # TODO(behdad): Add a configuration option for this?
+ deltas = self.compileDeltasGreedy(self.flags, deltas)
+ # deltas = self.compileDeltasOptimal(self.flags, deltas)
+
+ data.extend(deltas)
+ return b"".join(data)
+
+ def compileDeltasGreedy(self, flags, deltas):
+ # Implements greedy algorithm for packing coordinate deltas:
+ # uses shortest representation one coordinate at a time.
+ compressedFlags = bytearray()
+ compressedXs = bytearray()
+ compressedYs = bytearray()
+ lastflag = None
+ repeat = 0
+ for flag, (x, y) in zip(flags, deltas):
+ # Oh, the horrors of TrueType
+ # do x
+ if x == 0:
+ flag = flag | flagXsame
+ elif -255 <= x <= 255:
+ flag = flag | flagXShort
+ if x > 0:
+ flag = flag | flagXsame
+ else:
+ x = -x
+ compressedXs.append(x)
+ else:
+ compressedXs.extend(struct.pack(">h", x))
+ # do y
+ if y == 0:
+ flag = flag | flagYsame
+ elif -255 <= y <= 255:
+ flag = flag | flagYShort
+ if y > 0:
+ flag = flag | flagYsame
+ else:
+ y = -y
+ compressedYs.append(y)
+ else:
+ compressedYs.extend(struct.pack(">h", y))
+ # handle repeating flags
+ if flag == lastflag and repeat != 255:
+ repeat = repeat + 1
+ if repeat == 1:
+ compressedFlags.append(flag)
+ else:
+ compressedFlags[-2] = flag | flagRepeat
+ compressedFlags[-1] = repeat
+ else:
+ repeat = 0
+ compressedFlags.append(flag)
+ lastflag = flag
+ return (compressedFlags, compressedXs, compressedYs)
+
+ def compileDeltasOptimal(self, flags, deltas):
+ # Implements optimal, dynaic-programming, algorithm for packing coordinate
+ # deltas. The savings are negligible :(.
+ candidates = []
+ bestTuple = None
+ bestCost = 0
+ repeat = 0
+ for flag, (x, y) in zip(flags, deltas):
+ # Oh, the horrors of TrueType
+ flag, coordBytes = flagBest(x, y, flag)
+ bestCost += 1 + coordBytes
+ newCandidates = [
+ (bestCost, bestTuple, flag, coordBytes),
+ (bestCost + 1, bestTuple, (flag | flagRepeat), coordBytes),
+ ]
+ for lastCost, lastTuple, lastFlag, coordBytes in candidates:
+ if (
+ lastCost + coordBytes <= bestCost + 1
+ and (lastFlag & flagRepeat)
+ and (lastFlag < 0xFF00)
+ and flagSupports(lastFlag, flag)
+ ):
+ if (lastFlag & 0xFF) == (
+ flag | flagRepeat
+ ) and lastCost == bestCost + 1:
+ continue
+ newCandidates.append(
+ (lastCost + coordBytes, lastTuple, lastFlag + 256, coordBytes)
+ )
+ candidates = newCandidates
+ bestTuple = min(candidates, key=lambda t: t[0])
+ bestCost = bestTuple[0]
+
+ flags = []
+ while bestTuple:
+ cost, bestTuple, flag, coordBytes = bestTuple
+ flags.append(flag)
+ flags.reverse()
+
+ compressedFlags = bytearray()
+ compressedXs = bytearray()
+ compressedYs = bytearray()
+ coords = iter(deltas)
+ ff = []
+ for flag in flags:
+ repeatCount, flag = flag >> 8, flag & 0xFF
+ compressedFlags.append(flag)
+ if flag & flagRepeat:
+ assert repeatCount > 0
+ compressedFlags.append(repeatCount)
+ else:
+ assert repeatCount == 0
+ for i in range(1 + repeatCount):
+ x, y = next(coords)
+ flagEncodeCoords(flag, x, y, compressedXs, compressedYs)
+ ff.append(flag)
+ try:
+ next(coords)
+ raise Exception("internal error")
+ except StopIteration:
+ pass
+
+ return (compressedFlags, compressedXs, compressedYs)
+
+ def recalcBounds(self, glyfTable, *, boundsDone=None):
+ """Recalculates the bounds of the glyph.
+
+ Each glyph object stores its bounding box in the
+ ``xMin``/``yMin``/``xMax``/``yMax`` attributes. These bounds must be
+ recomputed when the ``coordinates`` change. The ``table__g_l_y_f`` bounds
+ must be provided to resolve component bounds.
+ """
+ if self.isComposite() and self.tryRecalcBoundsComposite(
+ glyfTable, boundsDone=boundsDone
+ ):
+ return
+ try:
+ coords, endPts, flags = self.getCoordinates(glyfTable)
+ self.xMin, self.yMin, self.xMax, self.yMax = coords.calcIntBounds()
+ except NotImplementedError:
+ pass
+
+ def tryRecalcBoundsComposite(self, glyfTable, *, boundsDone=None):
+ """Try recalculating the bounds of a composite glyph that has
+ certain constrained properties. Namely, none of the components
+ have a transform other than an integer translate, and none
+ uses the anchor points.
+
+ Each glyph object stores its bounding box in the
+ ``xMin``/``yMin``/``xMax``/``yMax`` attributes. These bounds must be
+ recomputed when the ``coordinates`` change. The ``table__g_l_y_f`` bounds
+ must be provided to resolve component bounds.
+
+ Return True if bounds were calculated, False otherwise.
+ """
+ for compo in self.components:
+ if hasattr(compo, "firstPt") or hasattr(compo, "transform"):
+ return False
+ if not float(compo.x).is_integer() or not float(compo.y).is_integer():
+ return False
+
+ # All components are untransformed and have an integer x/y translate
+ bounds = None
+ for compo in self.components:
+ glyphName = compo.glyphName
+ g = glyfTable[glyphName]
+
+ if boundsDone is None or glyphName not in boundsDone:
+ g.recalcBounds(glyfTable, boundsDone=boundsDone)
+ if boundsDone is not None:
+ boundsDone.add(glyphName)
+
+ x, y = compo.x, compo.y
+ bounds = updateBounds(bounds, (g.xMin + x, g.yMin + y))
+ bounds = updateBounds(bounds, (g.xMax + x, g.yMax + y))
+
+ if bounds is None:
+ bounds = (0, 0, 0, 0)
+ self.xMin, self.yMin, self.xMax, self.yMax = bounds
+ return True
+
+ def isComposite(self):
+ """Test whether a glyph has components"""
+ if hasattr(self, "data"):
+ return struct.unpack(">h", self.data[:2])[0] == -1 if self.data else False
+ else:
+ return self.numberOfContours == -1
+
+ def isVarComposite(self):
+ """Test whether a glyph has variable components"""
+ if hasattr(self, "data"):
+ return struct.unpack(">h", self.data[:2])[0] == -2 if self.data else False
+ else:
+ return self.numberOfContours == -2
+
+ def getCoordinates(self, glyfTable):
+ """Return the coordinates, end points and flags
+
+ This method returns three values: A :py:class:`GlyphCoordinates` object,
+ a list of the indexes of the final points of each contour (allowing you
+ to split up the coordinates list into contours) and a list of flags.
+
+ On simple glyphs, this method returns information from the glyph's own
+ contours; on composite glyphs, it "flattens" all components recursively
+ to return a list of coordinates representing all the components involved
+ in the glyph.
+
+ To interpret the flags for each point, see the "Simple Glyph Flags"
+ section of the `glyf table specification <https://docs.microsoft.com/en-us/typography/opentype/spec/glyf#simple-glyph-description>`.
+ """
+
+ if self.numberOfContours > 0:
+ return self.coordinates, self.endPtsOfContours, self.flags
+ elif self.isComposite():
+ # it's a composite
+ allCoords = GlyphCoordinates()
+ allFlags = bytearray()
+ allEndPts = []
+ for compo in self.components:
+ g = glyfTable[compo.glyphName]
+ try:
+ coordinates, endPts, flags = g.getCoordinates(glyfTable)
+ except RecursionError:
+ raise ttLib.TTLibError(
+ "glyph '%s' contains a recursive component reference"
+ % compo.glyphName
+ )
+ coordinates = GlyphCoordinates(coordinates)
+ if hasattr(compo, "firstPt"):
+ # component uses two reference points: we apply the transform _before_
+ # computing the offset between the points
+ if hasattr(compo, "transform"):
+ coordinates.transform(compo.transform)
+ x1, y1 = allCoords[compo.firstPt]
+ x2, y2 = coordinates[compo.secondPt]
+ move = x1 - x2, y1 - y2
+ coordinates.translate(move)
+ else:
+ # component uses XY offsets
+ move = compo.x, compo.y
+ if not hasattr(compo, "transform"):
+ coordinates.translate(move)
+ else:
+ apple_way = compo.flags & SCALED_COMPONENT_OFFSET
+ ms_way = compo.flags & UNSCALED_COMPONENT_OFFSET
+ assert not (apple_way and ms_way)
+ if not (apple_way or ms_way):
+ scale_component_offset = (
+ SCALE_COMPONENT_OFFSET_DEFAULT # see top of this file
+ )
+ else:
+ scale_component_offset = apple_way
+ if scale_component_offset:
+ # the Apple way: first move, then scale (ie. scale the component offset)
+ coordinates.translate(move)
+ coordinates.transform(compo.transform)
+ else:
+ # the MS way: first scale, then move
+ coordinates.transform(compo.transform)
+ coordinates.translate(move)
+ offset = len(allCoords)
+ allEndPts.extend(e + offset for e in endPts)
+ allCoords.extend(coordinates)
+ allFlags.extend(flags)
+ return allCoords, allEndPts, allFlags
+ elif self.isVarComposite():
+ raise NotImplementedError("use TTGlyphSet to draw VarComposite glyphs")
+ else:
+ return GlyphCoordinates(), [], bytearray()
+
+ def getComponentNames(self, glyfTable):
+ """Returns a list of names of component glyphs used in this glyph
+
+ This method can be used on simple glyphs (in which case it returns an
+ empty list) or composite glyphs.
+ """
+ if hasattr(self, "data") and self.isVarComposite():
+ # TODO(VarComposite) Add implementation without expanding glyph
+ self.expand(glyfTable)
+
+ if not hasattr(self, "data"):
+ if self.isComposite() or self.isVarComposite():
+ return [c.glyphName for c in self.components]
+ else:
+ return []
+
+ # Extract components without expanding glyph
+
+ if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0:
+ return [] # Not composite
+
+ data = self.data
+ i = 10
+ components = []
+ more = 1
+ while more:
+ flags, glyphID = struct.unpack(">HH", data[i : i + 4])
+ i += 4
+ flags = int(flags)
+ components.append(glyfTable.getGlyphName(int(glyphID)))
+
+ if flags & ARG_1_AND_2_ARE_WORDS:
+ i += 4
+ else:
+ i += 2
+ if flags & WE_HAVE_A_SCALE:
+ i += 2
+ elif flags & WE_HAVE_AN_X_AND_Y_SCALE:
+ i += 4
+ elif flags & WE_HAVE_A_TWO_BY_TWO:
+ i += 8
+ more = flags & MORE_COMPONENTS
+
+ return components
+
+ def trim(self, remove_hinting=False):
+ """Remove padding and, if requested, hinting, from a glyph.
+ This works on both expanded and compacted glyphs, without
+ expanding it."""
+ if not hasattr(self, "data"):
+ if remove_hinting:
+ if self.isComposite():
+ if hasattr(self, "program"):
+ del self.program
+ elif self.isVarComposite():
+ pass # Doesn't have hinting
+ else:
+ self.program = ttProgram.Program()
+ self.program.fromBytecode([])
+ # No padding to trim.
+ return
+ if not self.data:
+ return
+ numContours = struct.unpack(">h", self.data[:2])[0]
+ data = bytearray(self.data)
+ i = 10
+ if numContours >= 0:
+ i += 2 * numContours # endPtsOfContours
+ nCoordinates = ((data[i - 2] << 8) | data[i - 1]) + 1
+ instructionLen = (data[i] << 8) | data[i + 1]
+ if remove_hinting:
+ # Zero instruction length
+ data[i] = data[i + 1] = 0
+ i += 2
+ if instructionLen:
+ # Splice it out
+ data = data[:i] + data[i + instructionLen :]
+ instructionLen = 0
+ else:
+ i += 2 + instructionLen
+
+ coordBytes = 0
+ j = 0
+ while True:
+ flag = data[i]
+ i = i + 1
+ repeat = 1
+ if flag & flagRepeat:
+ repeat = data[i] + 1
+ i = i + 1
+ xBytes = yBytes = 0
+ if flag & flagXShort:
+ xBytes = 1
+ elif not (flag & flagXsame):
+ xBytes = 2
+ if flag & flagYShort:
+ yBytes = 1
+ elif not (flag & flagYsame):
+ yBytes = 2
+ coordBytes += (xBytes + yBytes) * repeat
+ j += repeat
+ if j >= nCoordinates:
+ break
+ assert j == nCoordinates, "bad glyph flags"
+ i += coordBytes
+ # Remove padding
+ data = data[:i]
+ elif self.isComposite():
+ more = 1
+ we_have_instructions = False
+ while more:
+ flags = (data[i] << 8) | data[i + 1]
+ if remove_hinting:
+ flags &= ~WE_HAVE_INSTRUCTIONS
+ if flags & WE_HAVE_INSTRUCTIONS:
+ we_have_instructions = True
+ data[i + 0] = flags >> 8
+ data[i + 1] = flags & 0xFF
+ i += 4
+ flags = int(flags)
+
+ if flags & ARG_1_AND_2_ARE_WORDS:
+ i += 4
+ else:
+ i += 2
+ if flags & WE_HAVE_A_SCALE:
+ i += 2
+ elif flags & WE_HAVE_AN_X_AND_Y_SCALE:
+ i += 4
+ elif flags & WE_HAVE_A_TWO_BY_TWO:
+ i += 8
+ more = flags & MORE_COMPONENTS
+ if we_have_instructions:
+ instructionLen = (data[i] << 8) | data[i + 1]
+ i += 2 + instructionLen
+ # Remove padding
+ data = data[:i]
+ elif self.isVarComposite():
+ i = 0
+ MIN_SIZE = GlyphVarComponent.MIN_SIZE
+ while len(data[i : i + MIN_SIZE]) >= MIN_SIZE:
+ size = GlyphVarComponent.getSize(data[i : i + MIN_SIZE])
+ i += size
+ data = data[:i]
+
+ self.data = data
+
+ def removeHinting(self):
+ """Removes TrueType hinting instructions from the glyph."""
+ self.trim(remove_hinting=True)
+
+ def draw(self, pen, glyfTable, offset=0):
+ """Draws the glyph using the supplied pen object.
+
+ Arguments:
+ pen: An object conforming to the pen protocol.
+ glyfTable: A :py:class:`table__g_l_y_f` object, to resolve components.
+ offset (int): A horizontal offset. If provided, all coordinates are
+ translated by this offset.
+ """
+
+ if self.isComposite():
+ for component in self.components:
+ glyphName, transform = component.getComponentInfo()
+ pen.addComponent(glyphName, transform)
+ return
+
+ coordinates, endPts, flags = self.getCoordinates(glyfTable)
+ if offset:
+ coordinates = coordinates.copy()
+ coordinates.translate((offset, 0))
+ start = 0
+ maybeInt = lambda v: int(v) if v == int(v) else v
+ for end in endPts:
+ end = end + 1
+ contour = coordinates[start:end]
+ cFlags = [flagOnCurve & f for f in flags[start:end]]
+ cuFlags = [flagCubic & f for f in flags[start:end]]
+ start = end
+ if 1 not in cFlags:
+ assert all(cuFlags) or not any(cuFlags)
+ cubic = all(cuFlags)
+ if cubic:
+ count = len(contour)
+ assert count % 2 == 0, "Odd number of cubic off-curves undefined"
+ l = contour[-1]
+ f = contour[0]
+ p0 = (maybeInt((l[0] + f[0]) * 0.5), maybeInt((l[1] + f[1]) * 0.5))
+ pen.moveTo(p0)
+ for i in range(0, count, 2):
+ p1 = contour[i]
+ p2 = contour[i + 1]
+ p4 = contour[i + 2 if i + 2 < count else 0]
+ p3 = (
+ maybeInt((p2[0] + p4[0]) * 0.5),
+ maybeInt((p2[1] + p4[1]) * 0.5),
+ )
+ pen.curveTo(p1, p2, p3)
+ else:
+ # There is not a single on-curve point on the curve,
+ # use pen.qCurveTo's special case by specifying None
+ # as the on-curve point.
+ contour.append(None)
+ pen.qCurveTo(*contour)
+ else:
+ # Shuffle the points so that the contour is guaranteed
+ # to *end* in an on-curve point, which we'll use for
+ # the moveTo.
+ firstOnCurve = cFlags.index(1) + 1
+ contour = contour[firstOnCurve:] + contour[:firstOnCurve]
+ cFlags = cFlags[firstOnCurve:] + cFlags[:firstOnCurve]
+ cuFlags = cuFlags[firstOnCurve:] + cuFlags[:firstOnCurve]
+ pen.moveTo(contour[-1])
+ while contour:
+ nextOnCurve = cFlags.index(1) + 1
+ if nextOnCurve == 1:
+ # Skip a final lineTo(), as it is implied by
+ # pen.closePath()
+ if len(contour) > 1:
+ pen.lineTo(contour[0])
+ else:
+ cubicFlags = [f for f in cuFlags[: nextOnCurve - 1]]
+ assert all(cubicFlags) or not any(cubicFlags)
+ cubic = any(cubicFlags)
+ if cubic:
+ assert all(
+ cubicFlags
+ ), "Mixed cubic and quadratic segment undefined"
+
+ count = nextOnCurve
+ assert (
+ count >= 3
+ ), "At least two cubic off-curve points required"
+ assert (
+ count - 1
+ ) % 2 == 0, "Odd number of cubic off-curves undefined"
+ for i in range(0, count - 3, 2):
+ p1 = contour[i]
+ p2 = contour[i + 1]
+ p4 = contour[i + 2]
+ p3 = (
+ maybeInt((p2[0] + p4[0]) * 0.5),
+ maybeInt((p2[1] + p4[1]) * 0.5),
+ )
+ lastOnCurve = p3
+ pen.curveTo(p1, p2, p3)
+ pen.curveTo(*contour[count - 3 : count])
+ else:
+ pen.qCurveTo(*contour[:nextOnCurve])
+ contour = contour[nextOnCurve:]
+ cFlags = cFlags[nextOnCurve:]
+ cuFlags = cuFlags[nextOnCurve:]
+ pen.closePath()
+
+ def drawPoints(self, pen, glyfTable, offset=0):
+ """Draw the glyph using the supplied pointPen. As opposed to Glyph.draw(),
+ this will not change the point indices.
+ """
+
+ if self.isComposite():
+ for component in self.components:
+ glyphName, transform = component.getComponentInfo()
+ pen.addComponent(glyphName, transform)
+ return
+
+ coordinates, endPts, flags = self.getCoordinates(glyfTable)
+ if offset:
+ coordinates = coordinates.copy()
+ coordinates.translate((offset, 0))
+ start = 0
+ for end in endPts:
+ end = end + 1
+ contour = coordinates[start:end]
+ cFlags = flags[start:end]
+ start = end
+ pen.beginPath()
+ # Start with the appropriate segment type based on the final segment
+
+ if cFlags[-1] & flagOnCurve:
+ segmentType = "line"
+ elif cFlags[-1] & flagCubic:
+ segmentType = "curve"
+ else:
+ segmentType = "qcurve"
+ for i, pt in enumerate(contour):
+ if cFlags[i] & flagOnCurve:
+ pen.addPoint(pt, segmentType=segmentType)
+ segmentType = "line"
+ else:
+ pen.addPoint(pt)
+ segmentType = "curve" if cFlags[i] & flagCubic else "qcurve"
+ pen.endPath()
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+ return self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
+
+
+# Vector.__round__ uses the built-in (Banker's) `round` but we want
+# to use otRound below
+_roundv = partial(Vector.__round__, round=otRound)
+
+
+def _is_mid_point(p0: tuple, p1: tuple, p2: tuple) -> bool:
+ # True if p1 is in the middle of p0 and p2, either before or after rounding
+ p0 = Vector(p0)
+ p1 = Vector(p1)
+ p2 = Vector(p2)
+ return ((p0 + p2) * 0.5).isclose(p1) or _roundv(p0) + _roundv(p2) == _roundv(p1) * 2
+
+
+def dropImpliedOnCurvePoints(*interpolatable_glyphs: Glyph) -> Set[int]:
+ """Drop impliable on-curve points from the (simple) glyph or glyphs.
+
+ In TrueType glyf outlines, on-curve points can be implied when they are located at
+ the midpoint of the line connecting two consecutive off-curve points.
+
+ If more than one glyphs are passed, these are assumed to be interpolatable masters
+ of the same glyph impliable, and thus only the on-curve points that are impliable
+ for all of them will actually be implied.
+ Composite glyphs or empty glyphs are skipped, only simple glyphs with 1 or more
+ contours are considered.
+ The input glyph(s) is/are modified in-place.
+
+ Args:
+ interpolatable_glyphs: The glyph or glyphs to modify in-place.
+
+ Returns:
+ The set of point indices that were dropped if any.
+
+ Raises:
+ ValueError if simple glyphs are not in fact interpolatable because they have
+ different point flags or number of contours.
+
+ Reference:
+ https://developer.apple.com/fonts/TrueType-Reference-Manual/RM01/Chap1.html
+ """
+ staticAttributes = SimpleNamespace(
+ numberOfContours=None, flags=None, endPtsOfContours=None
+ )
+ drop = None
+ simple_glyphs = []
+ for i, glyph in enumerate(interpolatable_glyphs):
+ if glyph.numberOfContours < 1:
+ # ignore composite or empty glyphs
+ continue
+
+ for attr in staticAttributes.__dict__:
+ expected = getattr(staticAttributes, attr)
+ found = getattr(glyph, attr)
+ if expected is None:
+ setattr(staticAttributes, attr, found)
+ elif expected != found:
+ raise ValueError(
+ f"Incompatible {attr} for glyph at master index {i}: "
+ f"expected {expected}, found {found}"
+ )
+
+ may_drop = set()
+ start = 0
+ coords = glyph.coordinates
+ flags = staticAttributes.flags
+ endPtsOfContours = staticAttributes.endPtsOfContours
+ for last in endPtsOfContours:
+ for i in range(start, last + 1):
+ if not (flags[i] & flagOnCurve):
+ continue
+ prv = i - 1 if i > start else last
+ nxt = i + 1 if i < last else start
+ if (flags[prv] & flagOnCurve) or flags[prv] != flags[nxt]:
+ continue
+ # we may drop the ith on-curve if halfway between previous/next off-curves
+ if not _is_mid_point(coords[prv], coords[i], coords[nxt]):
+ continue
+
+ may_drop.add(i)
+ start = last + 1
+ # we only want to drop if ALL interpolatable glyphs have the same implied oncurves
+ if drop is None:
+ drop = may_drop
+ else:
+ drop.intersection_update(may_drop)
+
+ simple_glyphs.append(glyph)
+
+ if drop:
+ # Do the actual dropping
+ flags = staticAttributes.flags
+ assert flags is not None
+ newFlags = array.array(
+ "B", (flags[i] for i in range(len(flags)) if i not in drop)
+ )
+
+ endPts = staticAttributes.endPtsOfContours
+ assert endPts is not None
+ newEndPts = []
+ i = 0
+ delta = 0
+ for d in sorted(drop):
+ while d > endPts[i]:
+ newEndPts.append(endPts[i] - delta)
+ i += 1
+ delta += 1
+ while i < len(endPts):
+ newEndPts.append(endPts[i] - delta)
+ i += 1
+
+ for glyph in simple_glyphs:
+ coords = glyph.coordinates
+ glyph.coordinates = GlyphCoordinates(
+ coords[i] for i in range(len(coords)) if i not in drop
+ )
+ glyph.flags = newFlags
+ glyph.endPtsOfContours = newEndPts
+
+ return drop if drop is not None else set()
-CompositeMaxpValues = namedtuple('CompositeMaxpValues', ['nPoints', 'nContours', 'maxComponentDepth'])
+class GlyphComponent(object):
+ """Represents a component within a composite glyph.
+
+ The component is represented internally with four attributes: ``glyphName``,
+ ``x``, ``y`` and ``transform``. If there is no "two-by-two" matrix (i.e
+ no scaling, reflection, or rotation; only translation), the ``transform``
+ attribute is not present.
+ """
+
+ # The above documentation is not *completely* true, but is *true enough* because
+ # the rare firstPt/lastPt attributes are not totally supported and nobody seems to
+ # mind - see below.
+
+ def __init__(self):
+ pass
+
+ def getComponentInfo(self):
+ """Return information about the component
+
+ This method returns a tuple of two values: the glyph name of the component's
+ base glyph, and a transformation matrix. As opposed to accessing the attributes
+ directly, ``getComponentInfo`` always returns a six-element tuple of the
+ component's transformation matrix, even when the two-by-two ``.transform``
+ matrix is not present.
+ """
+ # XXX Ignoring self.firstPt & self.lastpt for now: I need to implement
+ # something equivalent in fontTools.objects.glyph (I'd rather not
+ # convert it to an absolute offset, since it is valuable information).
+ # This method will now raise "AttributeError: x" on glyphs that use
+ # this TT feature.
+ if hasattr(self, "transform"):
+ [[xx, xy], [yx, yy]] = self.transform
+ trans = (xx, xy, yx, yy, self.x, self.y)
+ else:
+ trans = (1, 0, 0, 1, self.x, self.y)
+ return self.glyphName, trans
+
+ def decompile(self, data, glyfTable):
+ flags, glyphID = struct.unpack(">HH", data[:4])
+ self.flags = int(flags)
+ glyphID = int(glyphID)
+ self.glyphName = glyfTable.getGlyphName(int(glyphID))
+ data = data[4:]
+
+ if self.flags & ARG_1_AND_2_ARE_WORDS:
+ if self.flags & ARGS_ARE_XY_VALUES:
+ self.x, self.y = struct.unpack(">hh", data[:4])
+ else:
+ x, y = struct.unpack(">HH", data[:4])
+ self.firstPt, self.secondPt = int(x), int(y)
+ data = data[4:]
+ else:
+ if self.flags & ARGS_ARE_XY_VALUES:
+ self.x, self.y = struct.unpack(">bb", data[:2])
+ else:
+ x, y = struct.unpack(">BB", data[:2])
+ self.firstPt, self.secondPt = int(x), int(y)
+ data = data[2:]
+
+ if self.flags & WE_HAVE_A_SCALE:
+ (scale,) = struct.unpack(">h", data[:2])
+ self.transform = [
+ [fi2fl(scale, 14), 0],
+ [0, fi2fl(scale, 14)],
+ ] # fixed 2.14
+ data = data[2:]
+ elif self.flags & WE_HAVE_AN_X_AND_Y_SCALE:
+ xscale, yscale = struct.unpack(">hh", data[:4])
+ self.transform = [
+ [fi2fl(xscale, 14), 0],
+ [0, fi2fl(yscale, 14)],
+ ] # fixed 2.14
+ data = data[4:]
+ elif self.flags & WE_HAVE_A_TWO_BY_TWO:
+ (xscale, scale01, scale10, yscale) = struct.unpack(">hhhh", data[:8])
+ self.transform = [
+ [fi2fl(xscale, 14), fi2fl(scale01, 14)],
+ [fi2fl(scale10, 14), fi2fl(yscale, 14)],
+ ] # fixed 2.14
+ data = data[8:]
+ more = self.flags & MORE_COMPONENTS
+ haveInstructions = self.flags & WE_HAVE_INSTRUCTIONS
+ self.flags = self.flags & (
+ ROUND_XY_TO_GRID
+ | USE_MY_METRICS
+ | SCALED_COMPONENT_OFFSET
+ | UNSCALED_COMPONENT_OFFSET
+ | NON_OVERLAPPING
+ | OVERLAP_COMPOUND
+ )
+ return more, haveInstructions, data
+
+ def compile(self, more, haveInstructions, glyfTable):
+ data = b""
+
+ # reset all flags we will calculate ourselves
+ flags = self.flags & (
+ ROUND_XY_TO_GRID
+ | USE_MY_METRICS
+ | SCALED_COMPONENT_OFFSET
+ | UNSCALED_COMPONENT_OFFSET
+ | NON_OVERLAPPING
+ | OVERLAP_COMPOUND
+ )
+ if more:
+ flags = flags | MORE_COMPONENTS
+ if haveInstructions:
+ flags = flags | WE_HAVE_INSTRUCTIONS
+
+ if hasattr(self, "firstPt"):
+ if (0 <= self.firstPt <= 255) and (0 <= self.secondPt <= 255):
+ data = data + struct.pack(">BB", self.firstPt, self.secondPt)
+ else:
+ data = data + struct.pack(">HH", self.firstPt, self.secondPt)
+ flags = flags | ARG_1_AND_2_ARE_WORDS
+ else:
+ x = otRound(self.x)
+ y = otRound(self.y)
+ flags = flags | ARGS_ARE_XY_VALUES
+ if (-128 <= x <= 127) and (-128 <= y <= 127):
+ data = data + struct.pack(">bb", x, y)
+ else:
+ data = data + struct.pack(">hh", x, y)
+ flags = flags | ARG_1_AND_2_ARE_WORDS
+
+ if hasattr(self, "transform"):
+ transform = [[fl2fi(x, 14) for x in row] for row in self.transform]
+ if transform[0][1] or transform[1][0]:
+ flags = flags | WE_HAVE_A_TWO_BY_TWO
+ data = data + struct.pack(
+ ">hhhh",
+ transform[0][0],
+ transform[0][1],
+ transform[1][0],
+ transform[1][1],
+ )
+ elif transform[0][0] != transform[1][1]:
+ flags = flags | WE_HAVE_AN_X_AND_Y_SCALE
+ data = data + struct.pack(">hh", transform[0][0], transform[1][1])
+ else:
+ flags = flags | WE_HAVE_A_SCALE
+ data = data + struct.pack(">h", transform[0][0])
+
+ glyphID = glyfTable.getGlyphID(self.glyphName)
+ return struct.pack(">HH", flags, glyphID) + data
+
+ def toXML(self, writer, ttFont):
+ attrs = [("glyphName", self.glyphName)]
+ if not hasattr(self, "firstPt"):
+ attrs = attrs + [("x", self.x), ("y", self.y)]
+ else:
+ attrs = attrs + [("firstPt", self.firstPt), ("secondPt", self.secondPt)]
+
+ if hasattr(self, "transform"):
+ transform = self.transform
+ if transform[0][1] or transform[1][0]:
+ attrs = attrs + [
+ ("scalex", fl2str(transform[0][0], 14)),
+ ("scale01", fl2str(transform[0][1], 14)),
+ ("scale10", fl2str(transform[1][0], 14)),
+ ("scaley", fl2str(transform[1][1], 14)),
+ ]
+ elif transform[0][0] != transform[1][1]:
+ attrs = attrs + [
+ ("scalex", fl2str(transform[0][0], 14)),
+ ("scaley", fl2str(transform[1][1], 14)),
+ ]
+ else:
+ attrs = attrs + [("scale", fl2str(transform[0][0], 14))]
+ attrs = attrs + [("flags", hex(self.flags))]
+ writer.simpletag("component", attrs)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.glyphName = attrs["glyphName"]
+ if "firstPt" in attrs:
+ self.firstPt = safeEval(attrs["firstPt"])
+ self.secondPt = safeEval(attrs["secondPt"])
+ else:
+ self.x = safeEval(attrs["x"])
+ self.y = safeEval(attrs["y"])
+ if "scale01" in attrs:
+ scalex = str2fl(attrs["scalex"], 14)
+ scale01 = str2fl(attrs["scale01"], 14)
+ scale10 = str2fl(attrs["scale10"], 14)
+ scaley = str2fl(attrs["scaley"], 14)
+ self.transform = [[scalex, scale01], [scale10, scaley]]
+ elif "scalex" in attrs:
+ scalex = str2fl(attrs["scalex"], 14)
+ scaley = str2fl(attrs["scaley"], 14)
+ self.transform = [[scalex, 0], [0, scaley]]
+ elif "scale" in attrs:
+ scale = str2fl(attrs["scale"], 14)
+ self.transform = [[scale, 0], [0, scale]]
+ self.flags = safeEval(attrs["flags"])
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+ return self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
-class Glyph(object):
- """This class represents an individual TrueType glyph.
-
- TrueType glyph objects come in two flavours: simple and composite. Simple
- glyph objects contain contours, represented via the ``.coordinates``,
- ``.flags``, ``.numberOfContours``, and ``.endPtsOfContours`` attributes;
- composite glyphs contain components, available through the ``.components``
- attributes.
-
- Because the ``.coordinates`` attribute (and other simple glyph attributes mentioned
- above) is only set on simple glyphs and the ``.components`` attribute is only
- set on composite glyphs, it is necessary to use the :py:meth:`isComposite`
- method to test whether a glyph is simple or composite before attempting to
- access its data.
-
- For a composite glyph, the components can also be accessed via array-like access::
-
- >> assert(font["glyf"]["Aacute"].isComposite())
- >> font["glyf"]["Aacute"][0]
- <fontTools.ttLib.tables._g_l_y_f.GlyphComponent at 0x1027b2ee0>
-
- """
-
- def __init__(self, data=b""):
- if not data:
- # empty char
- self.numberOfContours = 0
- return
- self.data = data
-
- def compact(self, glyfTable, recalcBBoxes=True):
- data = self.compile(glyfTable, recalcBBoxes)
- self.__dict__.clear()
- self.data = data
-
- def expand(self, glyfTable):
- if not hasattr(self, "data"):
- # already unpacked
- return
- if not self.data:
- # empty char
- del self.data
- self.numberOfContours = 0
- return
- dummy, data = sstruct.unpack2(glyphHeaderFormat, self.data, self)
- del self.data
- # Some fonts (eg. Neirizi.ttf) have a 0 for numberOfContours in
- # some glyphs; decompileCoordinates assumes that there's at least
- # one, so short-circuit here.
- if self.numberOfContours == 0:
- return
- if self.isComposite():
- self.decompileComponents(data, glyfTable)
- else:
- self.decompileCoordinates(data)
-
- def compile(self, glyfTable, recalcBBoxes=True):
- if hasattr(self, "data"):
- if recalcBBoxes:
- # must unpack glyph in order to recalculate bounding box
- self.expand(glyfTable)
- else:
- return self.data
- if self.numberOfContours == 0:
- return b''
- if recalcBBoxes:
- self.recalcBounds(glyfTable)
- data = sstruct.pack(glyphHeaderFormat, self)
- if self.isComposite():
- data = data + self.compileComponents(glyfTable)
- else:
- data = data + self.compileCoordinates()
- return data
-
- def toXML(self, writer, ttFont):
- if self.isComposite():
- for compo in self.components:
- compo.toXML(writer, ttFont)
- haveInstructions = hasattr(self, "program")
- else:
- last = 0
- for i in range(self.numberOfContours):
- writer.begintag("contour")
- writer.newline()
- for j in range(last, self.endPtsOfContours[i] + 1):
- attrs = [
- ("x", self.coordinates[j][0]),
- ("y", self.coordinates[j][1]),
- ("on", self.flags[j] & flagOnCurve),
- ]
- if self.flags[j] & flagOverlapSimple:
- # Apple's rasterizer uses flagOverlapSimple in the first contour/first pt to flag glyphs that contain overlapping contours
- attrs.append(("overlap", 1))
- writer.simpletag("pt", attrs)
- writer.newline()
- last = self.endPtsOfContours[i] + 1
- writer.endtag("contour")
- writer.newline()
- haveInstructions = self.numberOfContours > 0
- if haveInstructions:
- if self.program:
- writer.begintag("instructions")
- writer.newline()
- self.program.toXML(writer, ttFont)
- writer.endtag("instructions")
- else:
- writer.simpletag("instructions")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "contour":
- if self.numberOfContours < 0:
- raise ttLib.TTLibError("can't mix composites and contours in glyph")
- self.numberOfContours = self.numberOfContours + 1
- coordinates = GlyphCoordinates()
- flags = bytearray()
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name != "pt":
- continue # ignore anything but "pt"
- coordinates.append((safeEval(attrs["x"]), safeEval(attrs["y"])))
- flag = bool(safeEval(attrs["on"]))
- if "overlap" in attrs and bool(safeEval(attrs["overlap"])):
- flag |= flagOverlapSimple
- flags.append(flag)
- if not hasattr(self, "coordinates"):
- self.coordinates = coordinates
- self.flags = flags
- self.endPtsOfContours = [len(coordinates)-1]
- else:
- self.coordinates.extend (coordinates)
- self.flags.extend(flags)
- self.endPtsOfContours.append(len(self.coordinates)-1)
- elif name == "component":
- if self.numberOfContours > 0:
- raise ttLib.TTLibError("can't mix composites and contours in glyph")
- self.numberOfContours = -1
- if not hasattr(self, "components"):
- self.components = []
- component = GlyphComponent()
- self.components.append(component)
- component.fromXML(name, attrs, content, ttFont)
- elif name == "instructions":
- self.program = ttProgram.Program()
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- self.program.fromXML(name, attrs, content, ttFont)
-
- def getCompositeMaxpValues(self, glyfTable, maxComponentDepth=1):
- assert self.isComposite()
- nContours = 0
- nPoints = 0
- initialMaxComponentDepth = maxComponentDepth
- for compo in self.components:
- baseGlyph = glyfTable[compo.glyphName]
- if baseGlyph.numberOfContours == 0:
- continue
- elif baseGlyph.numberOfContours > 0:
- nP, nC = baseGlyph.getMaxpValues()
- else:
- nP, nC, componentDepth = baseGlyph.getCompositeMaxpValues(
- glyfTable, initialMaxComponentDepth + 1)
- maxComponentDepth = max(maxComponentDepth, componentDepth)
- nPoints = nPoints + nP
- nContours = nContours + nC
- return CompositeMaxpValues(nPoints, nContours, maxComponentDepth)
-
- def getMaxpValues(self):
- assert self.numberOfContours > 0
- return len(self.coordinates), len(self.endPtsOfContours)
-
- def decompileComponents(self, data, glyfTable):
- self.components = []
- more = 1
- haveInstructions = 0
- while more:
- component = GlyphComponent()
- more, haveInstr, data = component.decompile(data, glyfTable)
- haveInstructions = haveInstructions | haveInstr
- self.components.append(component)
- if haveInstructions:
- numInstructions, = struct.unpack(">h", data[:2])
- data = data[2:]
- self.program = ttProgram.Program()
- self.program.fromBytecode(data[:numInstructions])
- data = data[numInstructions:]
- if len(data) >= 4:
- log.warning(
- "too much glyph data at the end of composite glyph: %d excess bytes",
- len(data))
-
- def decompileCoordinates(self, data):
- endPtsOfContours = array.array("h")
- endPtsOfContours.frombytes(data[:2*self.numberOfContours])
- if sys.byteorder != "big": endPtsOfContours.byteswap()
- self.endPtsOfContours = endPtsOfContours.tolist()
-
- pos = 2*self.numberOfContours
- instructionLength, = struct.unpack(">h", data[pos:pos+2])
- self.program = ttProgram.Program()
- self.program.fromBytecode(data[pos+2:pos+2+instructionLength])
- pos += 2 + instructionLength
- nCoordinates = self.endPtsOfContours[-1] + 1
- flags, xCoordinates, yCoordinates = \
- self.decompileCoordinatesRaw(nCoordinates, data, pos)
-
- # fill in repetitions and apply signs
- self.coordinates = coordinates = GlyphCoordinates.zeros(nCoordinates)
- xIndex = 0
- yIndex = 0
- for i in range(nCoordinates):
- flag = flags[i]
- # x coordinate
- if flag & flagXShort:
- if flag & flagXsame:
- x = xCoordinates[xIndex]
- else:
- x = -xCoordinates[xIndex]
- xIndex = xIndex + 1
- elif flag & flagXsame:
- x = 0
- else:
- x = xCoordinates[xIndex]
- xIndex = xIndex + 1
- # y coordinate
- if flag & flagYShort:
- if flag & flagYsame:
- y = yCoordinates[yIndex]
- else:
- y = -yCoordinates[yIndex]
- yIndex = yIndex + 1
- elif flag & flagYsame:
- y = 0
- else:
- y = yCoordinates[yIndex]
- yIndex = yIndex + 1
- coordinates[i] = (x, y)
- assert xIndex == len(xCoordinates)
- assert yIndex == len(yCoordinates)
- coordinates.relativeToAbsolute()
- # discard all flags except "keepFlags"
- for i in range(len(flags)):
- flags[i] &= keepFlags
- self.flags = flags
-
- def decompileCoordinatesRaw(self, nCoordinates, data, pos=0):
- # unpack flags and prepare unpacking of coordinates
- flags = bytearray(nCoordinates)
- # Warning: deep Python trickery going on. We use the struct module to unpack
- # the coordinates. We build a format string based on the flags, so we can
- # unpack the coordinates in one struct.unpack() call.
- xFormat = ">" # big endian
- yFormat = ">" # big endian
- j = 0
- while True:
- flag = data[pos]
- pos += 1
- repeat = 1
- if flag & flagRepeat:
- repeat = data[pos] + 1
- pos += 1
- for k in range(repeat):
- if flag & flagXShort:
- xFormat = xFormat + 'B'
- elif not (flag & flagXsame):
- xFormat = xFormat + 'h'
- if flag & flagYShort:
- yFormat = yFormat + 'B'
- elif not (flag & flagYsame):
- yFormat = yFormat + 'h'
- flags[j] = flag
- j = j + 1
- if j >= nCoordinates:
- break
- assert j == nCoordinates, "bad glyph flags"
- # unpack raw coordinates, krrrrrr-tching!
- xDataLen = struct.calcsize(xFormat)
- yDataLen = struct.calcsize(yFormat)
- if len(data) - pos - (xDataLen + yDataLen) >= 4:
- log.warning(
- "too much glyph data: %d excess bytes", len(data) - pos - (xDataLen + yDataLen))
- xCoordinates = struct.unpack(xFormat, data[pos:pos+xDataLen])
- yCoordinates = struct.unpack(yFormat, data[pos+xDataLen:pos+xDataLen+yDataLen])
- return flags, xCoordinates, yCoordinates
-
- def compileComponents(self, glyfTable):
- data = b""
- lastcomponent = len(self.components) - 1
- more = 1
- haveInstructions = 0
- for i in range(len(self.components)):
- if i == lastcomponent:
- haveInstructions = hasattr(self, "program")
- more = 0
- compo = self.components[i]
- data = data + compo.compile(more, haveInstructions, glyfTable)
- if haveInstructions:
- instructions = self.program.getBytecode()
- data = data + struct.pack(">h", len(instructions)) + instructions
- return data
-
- def compileCoordinates(self):
- assert len(self.coordinates) == len(self.flags)
- data = []
- endPtsOfContours = array.array("h", self.endPtsOfContours)
- if sys.byteorder != "big": endPtsOfContours.byteswap()
- data.append(endPtsOfContours.tobytes())
- instructions = self.program.getBytecode()
- data.append(struct.pack(">h", len(instructions)))
- data.append(instructions)
-
- deltas = self.coordinates.copy()
- deltas.toInt()
- deltas.absoluteToRelative()
-
- # TODO(behdad): Add a configuration option for this?
- deltas = self.compileDeltasGreedy(self.flags, deltas)
- #deltas = self.compileDeltasOptimal(self.flags, deltas)
-
- data.extend(deltas)
- return b''.join(data)
-
- def compileDeltasGreedy(self, flags, deltas):
- # Implements greedy algorithm for packing coordinate deltas:
- # uses shortest representation one coordinate at a time.
- compressedFlags = bytearray()
- compressedXs = bytearray()
- compressedYs = bytearray()
- lastflag = None
- repeat = 0
- for flag,(x,y) in zip(flags, deltas):
- # Oh, the horrors of TrueType
- # do x
- if x == 0:
- flag = flag | flagXsame
- elif -255 <= x <= 255:
- flag = flag | flagXShort
- if x > 0:
- flag = flag | flagXsame
- else:
- x = -x
- compressedXs.append(x)
- else:
- compressedXs.extend(struct.pack('>h', x))
- # do y
- if y == 0:
- flag = flag | flagYsame
- elif -255 <= y <= 255:
- flag = flag | flagYShort
- if y > 0:
- flag = flag | flagYsame
- else:
- y = -y
- compressedYs.append(y)
- else:
- compressedYs.extend(struct.pack('>h', y))
- # handle repeating flags
- if flag == lastflag and repeat != 255:
- repeat = repeat + 1
- if repeat == 1:
- compressedFlags.append(flag)
- else:
- compressedFlags[-2] = flag | flagRepeat
- compressedFlags[-1] = repeat
- else:
- repeat = 0
- compressedFlags.append(flag)
- lastflag = flag
- return (compressedFlags, compressedXs, compressedYs)
-
- def compileDeltasOptimal(self, flags, deltas):
- # Implements optimal, dynaic-programming, algorithm for packing coordinate
- # deltas. The savings are negligible :(.
- candidates = []
- bestTuple = None
- bestCost = 0
- repeat = 0
- for flag,(x,y) in zip(flags, deltas):
- # Oh, the horrors of TrueType
- flag, coordBytes = flagBest(x, y, flag)
- bestCost += 1 + coordBytes
- newCandidates = [(bestCost, bestTuple, flag, coordBytes),
- (bestCost+1, bestTuple, (flag|flagRepeat), coordBytes)]
- for lastCost,lastTuple,lastFlag,coordBytes in candidates:
- if lastCost + coordBytes <= bestCost + 1 and (lastFlag & flagRepeat) and (lastFlag < 0xff00) and flagSupports(lastFlag, flag):
- if (lastFlag & 0xFF) == (flag|flagRepeat) and lastCost == bestCost + 1:
- continue
- newCandidates.append((lastCost + coordBytes, lastTuple, lastFlag+256, coordBytes))
- candidates = newCandidates
- bestTuple = min(candidates, key=lambda t:t[0])
- bestCost = bestTuple[0]
-
- flags = []
- while bestTuple:
- cost, bestTuple, flag, coordBytes = bestTuple
- flags.append(flag)
- flags.reverse()
-
- compressedFlags = bytearray()
- compressedXs = bytearray()
- compressedYs = bytearray()
- coords = iter(deltas)
- ff = []
- for flag in flags:
- repeatCount, flag = flag >> 8, flag & 0xFF
- compressedFlags.append(flag)
- if flag & flagRepeat:
- assert(repeatCount > 0)
- compressedFlags.append(repeatCount)
- else:
- assert(repeatCount == 0)
- for i in range(1 + repeatCount):
- x,y = next(coords)
- flagEncodeCoords(flag, x, y, compressedXs, compressedYs)
- ff.append(flag)
- try:
- next(coords)
- raise Exception("internal error")
- except StopIteration:
- pass
-
- return (compressedFlags, compressedXs, compressedYs)
-
- def recalcBounds(self, glyfTable):
- """Recalculates the bounds of the glyph.
-
- Each glyph object stores its bounding box in the
- ``xMin``/``yMin``/``xMax``/``yMax`` attributes. These bounds must be
- recomputed when the ``coordinates`` change. The ``table__g_l_y_f`` bounds
- must be provided to resolve component bounds.
- """
- coords, endPts, flags = self.getCoordinates(glyfTable)
- self.xMin, self.yMin, self.xMax, self.yMax = calcIntBounds(coords)
-
- def isComposite(self):
- """Test whether a glyph has components"""
- if hasattr(self, "data") and self.data:
- return struct.unpack(">h", self.data[:2])[0] == -1
- else:
- return self.numberOfContours == -1
-
- def __getitem__(self, componentIndex):
- if not self.isComposite():
- raise ttLib.TTLibError("can't use glyph as sequence")
- return self.components[componentIndex]
-
- def getCoordinates(self, glyfTable):
- """Return the coordinates, end points and flags
-
- This method returns three values: A :py:class:`GlyphCoordinates` object,
- a list of the indexes of the final points of each contour (allowing you
- to split up the coordinates list into contours) and a list of flags.
-
- On simple glyphs, this method returns information from the glyph's own
- contours; on composite glyphs, it "flattens" all components recursively
- to return a list of coordinates representing all the components involved
- in the glyph.
-
- To interpret the flags for each point, see the "Simple Glyph Flags"
- section of the `glyf table specification <https://docs.microsoft.com/en-us/typography/opentype/spec/glyf#simple-glyph-description>`.
- """
-
- if self.numberOfContours > 0:
- return self.coordinates, self.endPtsOfContours, self.flags
- elif self.isComposite():
- # it's a composite
- allCoords = GlyphCoordinates()
- allFlags = bytearray()
- allEndPts = []
- for compo in self.components:
- g = glyfTable[compo.glyphName]
- try:
- coordinates, endPts, flags = g.getCoordinates(glyfTable)
- except RecursionError:
- raise ttLib.TTLibError("glyph '%s' contains a recursive component reference" % compo.glyphName)
- coordinates = GlyphCoordinates(coordinates)
- if hasattr(compo, "firstPt"):
- # component uses two reference points: we apply the transform _before_
- # computing the offset between the points
- if hasattr(compo, "transform"):
- coordinates.transform(compo.transform)
- x1,y1 = allCoords[compo.firstPt]
- x2,y2 = coordinates[compo.secondPt]
- move = x1-x2, y1-y2
- coordinates.translate(move)
- else:
- # component uses XY offsets
- move = compo.x, compo.y
- if not hasattr(compo, "transform"):
- coordinates.translate(move)
- else:
- apple_way = compo.flags & SCALED_COMPONENT_OFFSET
- ms_way = compo.flags & UNSCALED_COMPONENT_OFFSET
- assert not (apple_way and ms_way)
- if not (apple_way or ms_way):
- scale_component_offset = SCALE_COMPONENT_OFFSET_DEFAULT # see top of this file
- else:
- scale_component_offset = apple_way
- if scale_component_offset:
- # the Apple way: first move, then scale (ie. scale the component offset)
- coordinates.translate(move)
- coordinates.transform(compo.transform)
- else:
- # the MS way: first scale, then move
- coordinates.transform(compo.transform)
- coordinates.translate(move)
- offset = len(allCoords)
- allEndPts.extend(e + offset for e in endPts)
- allCoords.extend(coordinates)
- allFlags.extend(flags)
- return allCoords, allEndPts, allFlags
- else:
- return GlyphCoordinates(), [], bytearray()
-
- def getComponentNames(self, glyfTable):
- """Returns a list of names of component glyphs used in this glyph
-
- This method can be used on simple glyphs (in which case it returns an
- empty list) or composite glyphs.
- """
- if not hasattr(self, "data"):
- if self.isComposite():
- return [c.glyphName for c in self.components]
- else:
- return []
-
- # Extract components without expanding glyph
-
- if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0:
- return [] # Not composite
-
- data = self.data
- i = 10
- components = []
- more = 1
- while more:
- flags, glyphID = struct.unpack(">HH", data[i:i+4])
- i += 4
- flags = int(flags)
- components.append(glyfTable.getGlyphName(int(glyphID)))
-
- if flags & ARG_1_AND_2_ARE_WORDS: i += 4
- else: i += 2
- if flags & WE_HAVE_A_SCALE: i += 2
- elif flags & WE_HAVE_AN_X_AND_Y_SCALE: i += 4
- elif flags & WE_HAVE_A_TWO_BY_TWO: i += 8
- more = flags & MORE_COMPONENTS
-
- return components
-
- def trim(self, remove_hinting=False):
- """ Remove padding and, if requested, hinting, from a glyph.
- This works on both expanded and compacted glyphs, without
- expanding it."""
- if not hasattr(self, "data"):
- if remove_hinting:
- if self.isComposite():
- if hasattr(self, "program"):
- del self.program
- else:
- self.program = ttProgram.Program()
- self.program.fromBytecode([])
- # No padding to trim.
- return
- if not self.data:
- return
- numContours = struct.unpack(">h", self.data[:2])[0]
- data = bytearray(self.data)
- i = 10
- if numContours >= 0:
- i += 2 * numContours # endPtsOfContours
- nCoordinates = ((data[i-2] << 8) | data[i-1]) + 1
- instructionLen = (data[i] << 8) | data[i+1]
- if remove_hinting:
- # Zero instruction length
- data[i] = data [i+1] = 0
- i += 2
- if instructionLen:
- # Splice it out
- data = data[:i] + data[i+instructionLen:]
- instructionLen = 0
- else:
- i += 2 + instructionLen
-
- coordBytes = 0
- j = 0
- while True:
- flag = data[i]
- i = i + 1
- repeat = 1
- if flag & flagRepeat:
- repeat = data[i] + 1
- i = i + 1
- xBytes = yBytes = 0
- if flag & flagXShort:
- xBytes = 1
- elif not (flag & flagXsame):
- xBytes = 2
- if flag & flagYShort:
- yBytes = 1
- elif not (flag & flagYsame):
- yBytes = 2
- coordBytes += (xBytes + yBytes) * repeat
- j += repeat
- if j >= nCoordinates:
- break
- assert j == nCoordinates, "bad glyph flags"
- i += coordBytes
- # Remove padding
- data = data[:i]
- else:
- more = 1
- we_have_instructions = False
- while more:
- flags =(data[i] << 8) | data[i+1]
- if remove_hinting:
- flags &= ~WE_HAVE_INSTRUCTIONS
- if flags & WE_HAVE_INSTRUCTIONS:
- we_have_instructions = True
- data[i+0] = flags >> 8
- data[i+1] = flags & 0xFF
- i += 4
- flags = int(flags)
-
- if flags & ARG_1_AND_2_ARE_WORDS: i += 4
- else: i += 2
- if flags & WE_HAVE_A_SCALE: i += 2
- elif flags & WE_HAVE_AN_X_AND_Y_SCALE: i += 4
- elif flags & WE_HAVE_A_TWO_BY_TWO: i += 8
- more = flags & MORE_COMPONENTS
- if we_have_instructions:
- instructionLen = (data[i] << 8) | data[i+1]
- i += 2 + instructionLen
- # Remove padding
- data = data[:i]
-
- self.data = data
-
- def removeHinting(self):
- """Removes TrueType hinting instructions from the glyph."""
- self.trim (remove_hinting=True)
-
- def draw(self, pen, glyfTable, offset=0):
- """Draws the glyph using the supplied pen object.
-
- Arguments:
- pen: An object conforming to the pen protocol.
- glyfTable: A :py:class:`table__g_l_y_f` object, to resolve components.
- offset (int): A horizontal offset. If provided, all coordinates are
- translated by this offset.
- """
-
- if self.isComposite():
- for component in self.components:
- glyphName, transform = component.getComponentInfo()
- pen.addComponent(glyphName, transform)
- return
-
- coordinates, endPts, flags = self.getCoordinates(glyfTable)
- if offset:
- coordinates = coordinates.copy()
- coordinates.translate((offset, 0))
- start = 0
- for end in endPts:
- end = end + 1
- contour = coordinates[start:end]
- cFlags = [flagOnCurve & f for f in flags[start:end]]
- start = end
- if 1 not in cFlags:
- # There is not a single on-curve point on the curve,
- # use pen.qCurveTo's special case by specifying None
- # as the on-curve point.
- contour.append(None)
- pen.qCurveTo(*contour)
- else:
- # Shuffle the points so that contour the is guaranteed
- # to *end* in an on-curve point, which we'll use for
- # the moveTo.
- firstOnCurve = cFlags.index(1) + 1
- contour = contour[firstOnCurve:] + contour[:firstOnCurve]
- cFlags = cFlags[firstOnCurve:] + cFlags[:firstOnCurve]
- pen.moveTo(contour[-1])
- while contour:
- nextOnCurve = cFlags.index(1) + 1
- if nextOnCurve == 1:
- # Skip a final lineTo(), as it is implied by
- # pen.closePath()
- if len(contour) > 1:
- pen.lineTo(contour[0])
- else:
- pen.qCurveTo(*contour[:nextOnCurve])
- contour = contour[nextOnCurve:]
- cFlags = cFlags[nextOnCurve:]
- pen.closePath()
-
- def drawPoints(self, pen, glyfTable, offset=0):
- """Draw the glyph using the supplied pointPen. As opposed to Glyph.draw(),
- this will not change the point indices.
- """
-
- if self.isComposite():
- for component in self.components:
- glyphName, transform = component.getComponentInfo()
- pen.addComponent(glyphName, transform)
- return
-
- coordinates, endPts, flags = self.getCoordinates(glyfTable)
- if offset:
- coordinates = coordinates.copy()
- coordinates.translate((offset, 0))
- start = 0
- for end in endPts:
- end = end + 1
- contour = coordinates[start:end]
- cFlags = flags[start:end]
- start = end
- pen.beginPath()
- # Start with the appropriate segment type based on the final segment
- segmentType = "line" if cFlags[-1] == 1 else "qcurve"
- for i, pt in enumerate(contour):
- if cFlags[i] & flagOnCurve == 1:
- pen.addPoint(pt, segmentType=segmentType)
- segmentType = "line"
- else:
- pen.addPoint(pt)
- segmentType = "qcurve"
- pen.endPath()
-
- def __eq__(self, other):
- if type(self) != type(other):
- return NotImplemented
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
+#
+# Variable Composite glyphs
+# https://github.com/harfbuzz/boring-expansion-spec/blob/main/glyf1.md
+#
+
+
+class VarComponentFlags(IntFlag):
+ USE_MY_METRICS = 0x0001
+ AXIS_INDICES_ARE_SHORT = 0x0002
+ UNIFORM_SCALE = 0x0004
+ HAVE_TRANSLATE_X = 0x0008
+ HAVE_TRANSLATE_Y = 0x0010
+ HAVE_ROTATION = 0x0020
+ HAVE_SCALE_X = 0x0040
+ HAVE_SCALE_Y = 0x0080
+ HAVE_SKEW_X = 0x0100
+ HAVE_SKEW_Y = 0x0200
+ HAVE_TCENTER_X = 0x0400
+ HAVE_TCENTER_Y = 0x0800
+ GID_IS_24BIT = 0x1000
+ AXES_HAVE_VARIATION = 0x2000
+ RESET_UNSPECIFIED_AXES = 0x4000
+
+
+VarComponentTransformMappingValues = namedtuple(
+ "VarComponentTransformMappingValues",
+ ["flag", "fractionalBits", "scale", "defaultValue"],
+)
+
+VAR_COMPONENT_TRANSFORM_MAPPING = {
+ "translateX": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_TRANSLATE_X, 0, 1, 0
+ ),
+ "translateY": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_TRANSLATE_Y, 0, 1, 0
+ ),
+ "rotation": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_ROTATION, 12, 180, 0
+ ),
+ "scaleX": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_SCALE_X, 10, 1, 1
+ ),
+ "scaleY": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_SCALE_Y, 10, 1, 1
+ ),
+ "skewX": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_SKEW_X, 12, -180, 0
+ ),
+ "skewY": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_SKEW_Y, 12, 180, 0
+ ),
+ "tCenterX": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_TCENTER_X, 0, 1, 0
+ ),
+ "tCenterY": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_TCENTER_Y, 0, 1, 0
+ ),
+}
+
+
+class GlyphVarComponent(object):
+ MIN_SIZE = 5
+
+ def __init__(self):
+ self.location = {}
+ self.transform = DecomposedTransform()
+
+ @staticmethod
+ def getSize(data):
+ size = 5
+ flags = struct.unpack(">H", data[:2])[0]
+ numAxes = int(data[2])
+
+ if flags & VarComponentFlags.GID_IS_24BIT:
+ size += 1
+
+ size += numAxes
+ if flags & VarComponentFlags.AXIS_INDICES_ARE_SHORT:
+ size += 2 * numAxes
+ else:
+ axisIndices = array.array("B", data[:numAxes])
+ size += numAxes
+
+ for attr_name, mapping_values in VAR_COMPONENT_TRANSFORM_MAPPING.items():
+ if flags & mapping_values.flag:
+ size += 2
+
+ return size
+
+ def decompile(self, data, glyfTable):
+ flags = struct.unpack(">H", data[:2])[0]
+ self.flags = int(flags)
+ data = data[2:]
+
+ numAxes = int(data[0])
+ data = data[1:]
+
+ if flags & VarComponentFlags.GID_IS_24BIT:
+ glyphID = int(struct.unpack(">L", b"\0" + data[:3])[0])
+ data = data[3:]
+ flags ^= VarComponentFlags.GID_IS_24BIT
+ else:
+ glyphID = int(struct.unpack(">H", data[:2])[0])
+ data = data[2:]
+ self.glyphName = glyfTable.getGlyphName(int(glyphID))
+
+ if flags & VarComponentFlags.AXIS_INDICES_ARE_SHORT:
+ axisIndices = array.array("H", data[: 2 * numAxes])
+ if sys.byteorder != "big":
+ axisIndices.byteswap()
+ data = data[2 * numAxes :]
+ flags ^= VarComponentFlags.AXIS_INDICES_ARE_SHORT
+ else:
+ axisIndices = array.array("B", data[:numAxes])
+ data = data[numAxes:]
+ assert len(axisIndices) == numAxes
+ axisIndices = list(axisIndices)
+
+ axisValues = array.array("h", data[: 2 * numAxes])
+ if sys.byteorder != "big":
+ axisValues.byteswap()
+ data = data[2 * numAxes :]
+ assert len(axisValues) == numAxes
+ axisValues = [fi2fl(v, 14) for v in axisValues]
+
+ self.location = {
+ glyfTable.axisTags[i]: v for i, v in zip(axisIndices, axisValues)
+ }
+
+ def read_transform_component(data, values):
+ if flags & values.flag:
+ return (
+ data[2:],
+ fi2fl(struct.unpack(">h", data[:2])[0], values.fractionalBits)
+ * values.scale,
+ )
+ else:
+ return data, values.defaultValue
+
+ for attr_name, mapping_values in VAR_COMPONENT_TRANSFORM_MAPPING.items():
+ data, value = read_transform_component(data, mapping_values)
+ setattr(self.transform, attr_name, value)
+
+ if flags & VarComponentFlags.UNIFORM_SCALE:
+ if flags & VarComponentFlags.HAVE_SCALE_X and not (
+ flags & VarComponentFlags.HAVE_SCALE_Y
+ ):
+ self.transform.scaleY = self.transform.scaleX
+ flags |= VarComponentFlags.HAVE_SCALE_Y
+ flags ^= VarComponentFlags.UNIFORM_SCALE
+
+ return data
+
+ def compile(self, glyfTable):
+ data = b""
+
+ if not hasattr(self, "flags"):
+ flags = 0
+ # Calculate optimal transform component flags
+ for attr_name, mapping in VAR_COMPONENT_TRANSFORM_MAPPING.items():
+ value = getattr(self.transform, attr_name)
+ if fl2fi(value / mapping.scale, mapping.fractionalBits) != fl2fi(
+ mapping.defaultValue / mapping.scale, mapping.fractionalBits
+ ):
+ flags |= mapping.flag
+ else:
+ flags = self.flags
+
+ if (
+ flags & VarComponentFlags.HAVE_SCALE_X
+ and flags & VarComponentFlags.HAVE_SCALE_Y
+ and fl2fi(self.transform.scaleX, 10) == fl2fi(self.transform.scaleY, 10)
+ ):
+ flags |= VarComponentFlags.UNIFORM_SCALE
+ flags ^= VarComponentFlags.HAVE_SCALE_Y
+
+ numAxes = len(self.location)
+
+ data = data + struct.pack(">B", numAxes)
+
+ glyphID = glyfTable.getGlyphID(self.glyphName)
+ if glyphID > 65535:
+ flags |= VarComponentFlags.GID_IS_24BIT
+ data = data + struct.pack(">L", glyphID)[1:]
+ else:
+ data = data + struct.pack(">H", glyphID)
+
+ axisIndices = [glyfTable.axisTags.index(tag) for tag in self.location.keys()]
+ if all(a <= 255 for a in axisIndices):
+ axisIndices = array.array("B", axisIndices)
+ else:
+ axisIndices = array.array("H", axisIndices)
+ if sys.byteorder != "big":
+ axisIndices.byteswap()
+ flags |= VarComponentFlags.AXIS_INDICES_ARE_SHORT
+ data = data + bytes(axisIndices)
+
+ axisValues = self.location.values()
+ axisValues = array.array("h", (fl2fi(v, 14) for v in axisValues))
+ if sys.byteorder != "big":
+ axisValues.byteswap()
+ data = data + bytes(axisValues)
+
+ def write_transform_component(data, value, values):
+ if flags & values.flag:
+ return data + struct.pack(
+ ">h", fl2fi(value / values.scale, values.fractionalBits)
+ )
+ else:
+ return data
+
+ for attr_name, mapping_values in VAR_COMPONENT_TRANSFORM_MAPPING.items():
+ value = getattr(self.transform, attr_name)
+ data = write_transform_component(data, value, mapping_values)
+
+ return struct.pack(">H", flags) + data
+
+ def toXML(self, writer, ttFont):
+ attrs = [("glyphName", self.glyphName)]
+
+ if hasattr(self, "flags"):
+ attrs = attrs + [("flags", hex(self.flags))]
+
+ for attr_name, mapping in VAR_COMPONENT_TRANSFORM_MAPPING.items():
+ v = getattr(self.transform, attr_name)
+ if v != mapping.defaultValue:
+ attrs.append((attr_name, fl2str(v, mapping.fractionalBits)))
+
+ writer.begintag("varComponent", attrs)
+ writer.newline()
+
+ writer.begintag("location")
+ writer.newline()
+ for tag, v in self.location.items():
+ writer.simpletag("axis", [("tag", tag), ("value", fl2str(v, 14))])
+ writer.newline()
+ writer.endtag("location")
+ writer.newline()
+
+ writer.endtag("varComponent")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.glyphName = attrs["glyphName"]
+
+ if "flags" in attrs:
+ self.flags = safeEval(attrs["flags"])
+
+ for attr_name, mapping in VAR_COMPONENT_TRANSFORM_MAPPING.items():
+ if attr_name not in attrs:
+ continue
+ v = str2fl(safeEval(attrs[attr_name]), mapping.fractionalBits)
+ setattr(self.transform, attr_name, v)
+
+ for c in content:
+ if not isinstance(c, tuple):
+ continue
+ name, attrs, content = c
+ if name != "location":
+ continue
+ for c in content:
+ if not isinstance(c, tuple):
+ continue
+ name, attrs, content = c
+ assert name == "axis"
+ assert not content
+ self.location[attrs["tag"]] = str2fl(safeEval(attrs["value"]), 14)
+
+ def getPointCount(self):
+ assert hasattr(self, "flags"), "VarComponent with variations must have flags"
+
+ count = 0
+
+ if self.flags & VarComponentFlags.AXES_HAVE_VARIATION:
+ count += len(self.location)
+
+ if self.flags & (
+ VarComponentFlags.HAVE_TRANSLATE_X | VarComponentFlags.HAVE_TRANSLATE_Y
+ ):
+ count += 1
+ if self.flags & VarComponentFlags.HAVE_ROTATION:
+ count += 1
+ if self.flags & (
+ VarComponentFlags.HAVE_SCALE_X | VarComponentFlags.HAVE_SCALE_Y
+ ):
+ count += 1
+ if self.flags & (VarComponentFlags.HAVE_SKEW_X | VarComponentFlags.HAVE_SKEW_Y):
+ count += 1
+ if self.flags & (
+ VarComponentFlags.HAVE_TCENTER_X | VarComponentFlags.HAVE_TCENTER_Y
+ ):
+ count += 1
+
+ return count
+
+ def getCoordinatesAndControls(self):
+ coords = []
+ controls = []
+
+ if self.flags & VarComponentFlags.AXES_HAVE_VARIATION:
+ for tag, v in self.location.items():
+ controls.append(tag)
+ coords.append((fl2fi(v, 14), 0))
+
+ if self.flags & (
+ VarComponentFlags.HAVE_TRANSLATE_X | VarComponentFlags.HAVE_TRANSLATE_Y
+ ):
+ controls.append("translate")
+ coords.append((self.transform.translateX, self.transform.translateY))
+ if self.flags & VarComponentFlags.HAVE_ROTATION:
+ controls.append("rotation")
+ coords.append((fl2fi(self.transform.rotation / 180, 12), 0))
+ if self.flags & (
+ VarComponentFlags.HAVE_SCALE_X | VarComponentFlags.HAVE_SCALE_Y
+ ):
+ controls.append("scale")
+ coords.append(
+ (fl2fi(self.transform.scaleX, 10), fl2fi(self.transform.scaleY, 10))
+ )
+ if self.flags & (VarComponentFlags.HAVE_SKEW_X | VarComponentFlags.HAVE_SKEW_Y):
+ controls.append("skew")
+ coords.append(
+ (
+ fl2fi(self.transform.skewX / -180, 12),
+ fl2fi(self.transform.skewY / 180, 12),
+ )
+ )
+ if self.flags & (
+ VarComponentFlags.HAVE_TCENTER_X | VarComponentFlags.HAVE_TCENTER_Y
+ ):
+ controls.append("tCenter")
+ coords.append((self.transform.tCenterX, self.transform.tCenterY))
+
+ return coords, controls
+
+ def setCoordinates(self, coords):
+ i = 0
+
+ if self.flags & VarComponentFlags.AXES_HAVE_VARIATION:
+ newLocation = {}
+ for tag in self.location:
+ newLocation[tag] = fi2fl(coords[i][0], 14)
+ i += 1
+ self.location = newLocation
+
+ self.transform = DecomposedTransform()
+ if self.flags & (
+ VarComponentFlags.HAVE_TRANSLATE_X | VarComponentFlags.HAVE_TRANSLATE_Y
+ ):
+ self.transform.translateX, self.transform.translateY = coords[i]
+ i += 1
+ if self.flags & VarComponentFlags.HAVE_ROTATION:
+ self.transform.rotation = fi2fl(coords[i][0], 12) * 180
+ i += 1
+ if self.flags & (
+ VarComponentFlags.HAVE_SCALE_X | VarComponentFlags.HAVE_SCALE_Y
+ ):
+ self.transform.scaleX, self.transform.scaleY = fi2fl(
+ coords[i][0], 10
+ ), fi2fl(coords[i][1], 10)
+ i += 1
+ if self.flags & (VarComponentFlags.HAVE_SKEW_X | VarComponentFlags.HAVE_SKEW_Y):
+ self.transform.skewX, self.transform.skewY = (
+ fi2fl(coords[i][0], 12) * -180,
+ fi2fl(coords[i][1], 12) * 180,
+ )
+ i += 1
+ if self.flags & (
+ VarComponentFlags.HAVE_TCENTER_X | VarComponentFlags.HAVE_TCENTER_Y
+ ):
+ self.transform.tCenterX, self.transform.tCenterY = coords[i]
+ i += 1
+
+ return coords[i:]
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+ return self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
-class GlyphComponent(object):
- """Represents a component within a composite glyph.
-
- The component is represented internally with four attributes: ``glyphName``,
- ``x``, ``y`` and ``transform``. If there is no "two-by-two" matrix (i.e
- no scaling, reflection, or rotation; only translation), the ``transform``
- attribute is not present.
- """
- # The above documentation is not *completely* true, but is *true enough* because
- # the rare firstPt/lastPt attributes are not totally supported and nobody seems to
- # mind - see below.
-
- def __init__(self):
- pass
-
- def getComponentInfo(self):
- """Return information about the component
-
- This method returns a tuple of two values: the glyph name of the component's
- base glyph, and a transformation matrix. As opposed to accessing the attributes
- directly, ``getComponentInfo`` always returns a six-element tuple of the
- component's transformation matrix, even when the two-by-two ``.transform``
- matrix is not present.
- """
- # XXX Ignoring self.firstPt & self.lastpt for now: I need to implement
- # something equivalent in fontTools.objects.glyph (I'd rather not
- # convert it to an absolute offset, since it is valuable information).
- # This method will now raise "AttributeError: x" on glyphs that use
- # this TT feature.
- if hasattr(self, "transform"):
- [[xx, xy], [yx, yy]] = self.transform
- trans = (xx, xy, yx, yy, self.x, self.y)
- else:
- trans = (1, 0, 0, 1, self.x, self.y)
- return self.glyphName, trans
-
- def decompile(self, data, glyfTable):
- flags, glyphID = struct.unpack(">HH", data[:4])
- self.flags = int(flags)
- glyphID = int(glyphID)
- self.glyphName = glyfTable.getGlyphName(int(glyphID))
- data = data[4:]
-
- if self.flags & ARG_1_AND_2_ARE_WORDS:
- if self.flags & ARGS_ARE_XY_VALUES:
- self.x, self.y = struct.unpack(">hh", data[:4])
- else:
- x, y = struct.unpack(">HH", data[:4])
- self.firstPt, self.secondPt = int(x), int(y)
- data = data[4:]
- else:
- if self.flags & ARGS_ARE_XY_VALUES:
- self.x, self.y = struct.unpack(">bb", data[:2])
- else:
- x, y = struct.unpack(">BB", data[:2])
- self.firstPt, self.secondPt = int(x), int(y)
- data = data[2:]
-
- if self.flags & WE_HAVE_A_SCALE:
- scale, = struct.unpack(">h", data[:2])
- self.transform = [[fi2fl(scale,14), 0], [0, fi2fl(scale,14)]] # fixed 2.14
- data = data[2:]
- elif self.flags & WE_HAVE_AN_X_AND_Y_SCALE:
- xscale, yscale = struct.unpack(">hh", data[:4])
- self.transform = [[fi2fl(xscale,14), 0], [0, fi2fl(yscale,14)]] # fixed 2.14
- data = data[4:]
- elif self.flags & WE_HAVE_A_TWO_BY_TWO:
- (xscale, scale01,
- scale10, yscale) = struct.unpack(">hhhh", data[:8])
- self.transform = [[fi2fl(xscale,14), fi2fl(scale01,14)],
- [fi2fl(scale10,14), fi2fl(yscale,14)]] # fixed 2.14
- data = data[8:]
- more = self.flags & MORE_COMPONENTS
- haveInstructions = self.flags & WE_HAVE_INSTRUCTIONS
- self.flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS |
- SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET |
- NON_OVERLAPPING | OVERLAP_COMPOUND)
- return more, haveInstructions, data
-
- def compile(self, more, haveInstructions, glyfTable):
- data = b""
-
- # reset all flags we will calculate ourselves
- flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS |
- SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET |
- NON_OVERLAPPING | OVERLAP_COMPOUND)
- if more:
- flags = flags | MORE_COMPONENTS
- if haveInstructions:
- flags = flags | WE_HAVE_INSTRUCTIONS
-
- if hasattr(self, "firstPt"):
- if (0 <= self.firstPt <= 255) and (0 <= self.secondPt <= 255):
- data = data + struct.pack(">BB", self.firstPt, self.secondPt)
- else:
- data = data + struct.pack(">HH", self.firstPt, self.secondPt)
- flags = flags | ARG_1_AND_2_ARE_WORDS
- else:
- x = otRound(self.x)
- y = otRound(self.y)
- flags = flags | ARGS_ARE_XY_VALUES
- if (-128 <= x <= 127) and (-128 <= y <= 127):
- data = data + struct.pack(">bb", x, y)
- else:
- data = data + struct.pack(">hh", x, y)
- flags = flags | ARG_1_AND_2_ARE_WORDS
-
- if hasattr(self, "transform"):
- transform = [[fl2fi(x,14) for x in row] for row in self.transform]
- if transform[0][1] or transform[1][0]:
- flags = flags | WE_HAVE_A_TWO_BY_TWO
- data = data + struct.pack(">hhhh",
- transform[0][0], transform[0][1],
- transform[1][0], transform[1][1])
- elif transform[0][0] != transform[1][1]:
- flags = flags | WE_HAVE_AN_X_AND_Y_SCALE
- data = data + struct.pack(">hh",
- transform[0][0], transform[1][1])
- else:
- flags = flags | WE_HAVE_A_SCALE
- data = data + struct.pack(">h",
- transform[0][0])
-
- glyphID = glyfTable.getGlyphID(self.glyphName)
- return struct.pack(">HH", flags, glyphID) + data
-
- def toXML(self, writer, ttFont):
- attrs = [("glyphName", self.glyphName)]
- if not hasattr(self, "firstPt"):
- attrs = attrs + [("x", self.x), ("y", self.y)]
- else:
- attrs = attrs + [("firstPt", self.firstPt), ("secondPt", self.secondPt)]
-
- if hasattr(self, "transform"):
- transform = self.transform
- if transform[0][1] or transform[1][0]:
- attrs = attrs + [
- ("scalex", fl2str(transform[0][0], 14)),
- ("scale01", fl2str(transform[0][1], 14)),
- ("scale10", fl2str(transform[1][0], 14)),
- ("scaley", fl2str(transform[1][1], 14)),
- ]
- elif transform[0][0] != transform[1][1]:
- attrs = attrs + [
- ("scalex", fl2str(transform[0][0], 14)),
- ("scaley", fl2str(transform[1][1], 14)),
- ]
- else:
- attrs = attrs + [("scale", fl2str(transform[0][0], 14))]
- attrs = attrs + [("flags", hex(self.flags))]
- writer.simpletag("component", attrs)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.glyphName = attrs["glyphName"]
- if "firstPt" in attrs:
- self.firstPt = safeEval(attrs["firstPt"])
- self.secondPt = safeEval(attrs["secondPt"])
- else:
- self.x = safeEval(attrs["x"])
- self.y = safeEval(attrs["y"])
- if "scale01" in attrs:
- scalex = str2fl(attrs["scalex"], 14)
- scale01 = str2fl(attrs["scale01"], 14)
- scale10 = str2fl(attrs["scale10"], 14)
- scaley = str2fl(attrs["scaley"], 14)
- self.transform = [[scalex, scale01], [scale10, scaley]]
- elif "scalex" in attrs:
- scalex = str2fl(attrs["scalex"], 14)
- scaley = str2fl(attrs["scaley"], 14)
- self.transform = [[scalex, 0], [0, scaley]]
- elif "scale" in attrs:
- scale = str2fl(attrs["scale"], 14)
- self.transform = [[scale, 0], [0, scale]]
- self.flags = safeEval(attrs["flags"])
-
- def __eq__(self, other):
- if type(self) != type(other):
- return NotImplemented
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
class GlyphCoordinates(object):
- """A list of glyph coordinates.
-
- Unlike an ordinary list, this is a numpy-like matrix object which supports
- matrix addition, scalar multiplication and other operations described below.
- """
- def __init__(self, iterable=[]):
- self._a = array.array('d')
- self.extend(iterable)
-
- @property
- def array(self):
- """Returns the underlying array of coordinates"""
- return self._a
-
- @staticmethod
- def zeros(count):
- """Creates a new ``GlyphCoordinates`` object with all coordinates set to (0,0)"""
- g = GlyphCoordinates()
- g._a.frombytes(bytes(count * 2 * g._a.itemsize))
- return g
-
- def copy(self):
- """Creates a new ``GlyphCoordinates`` object which is a copy of the current one."""
- c = GlyphCoordinates()
- c._a.extend(self._a)
- return c
-
- def __len__(self):
- """Returns the number of coordinates in the array."""
- return len(self._a) // 2
-
- def __getitem__(self, k):
- """Returns a two element tuple (x,y)"""
- if isinstance(k, slice):
- indices = range(*k.indices(len(self)))
- return [self[i] for i in indices]
- a = self._a
- x = a[2*k]
- y = a[2*k+1]
- return (int(x) if x.is_integer() else x,
- int(y) if y.is_integer() else y)
-
- def __setitem__(self, k, v):
- """Sets a point's coordinates to a two element tuple (x,y)"""
- if isinstance(k, slice):
- indices = range(*k.indices(len(self)))
- # XXX This only works if len(v) == len(indices)
- for j,i in enumerate(indices):
- self[i] = v[j]
- return
- self._a[2*k],self._a[2*k+1] = v
-
- def __delitem__(self, i):
- """Removes a point from the list"""
- i = (2*i) % len(self._a)
- del self._a[i]
- del self._a[i]
-
- def __repr__(self):
- return 'GlyphCoordinates(['+','.join(str(c) for c in self)+'])'
-
- def append(self, p):
- self._a.extend(tuple(p))
-
- def extend(self, iterable):
- for p in iterable:
- self._a.extend(p)
-
- def toInt(self, *, round=otRound):
- a = self._a
- for i in range(len(a)):
- a[i] = round(a[i])
-
- def relativeToAbsolute(self):
- a = self._a
- x,y = 0,0
- for i in range(0, len(a), 2):
- a[i ] = x = a[i ] + x
- a[i+1] = y = a[i+1] + y
-
- def absoluteToRelative(self):
- a = self._a
- x,y = 0,0
- for i in range(0, len(a), 2):
- nx = a[i ]
- ny = a[i+1]
- a[i] = nx - x
- a[i+1] = ny - y
- x = nx
- y = ny
-
- def translate(self, p):
- """
- >>> GlyphCoordinates([(1,2)]).translate((.5,0))
- """
- x,y = p
- if x == 0 and y == 0:
- return
- a = self._a
- for i in range(0, len(a), 2):
- a[i] += x
- a[i+1] += y
-
- def scale(self, p):
- """
- >>> GlyphCoordinates([(1,2)]).scale((.5,0))
- """
- x,y = p
- if x == 1 and y == 1:
- return
- a = self._a
- for i in range(0, len(a), 2):
- a[i] *= x
- a[i+1] *= y
-
- def transform(self, t):
- """
- >>> GlyphCoordinates([(1,2)]).transform(((.5,0),(.2,.5)))
- """
- a = self._a
- for i in range(0, len(a), 2):
- x = a[i ]
- y = a[i+1]
- px = x * t[0][0] + y * t[1][0]
- py = x * t[0][1] + y * t[1][1]
- a[i] = px
- a[i+1] = py
-
- def __eq__(self, other):
- """
- >>> g = GlyphCoordinates([(1,2)])
- >>> g2 = GlyphCoordinates([(1.0,2)])
- >>> g3 = GlyphCoordinates([(1.5,2)])
- >>> g == g2
- True
- >>> g == g3
- False
- >>> g2 == g3
- False
- """
- if type(self) != type(other):
- return NotImplemented
- return self._a == other._a
-
- def __ne__(self, other):
- """
- >>> g = GlyphCoordinates([(1,2)])
- >>> g2 = GlyphCoordinates([(1.0,2)])
- >>> g3 = GlyphCoordinates([(1.5,2)])
- >>> g != g2
- False
- >>> g != g3
- True
- >>> g2 != g3
- True
- """
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
-
- # Math operations
-
- def __pos__(self):
- """
- >>> g = GlyphCoordinates([(1,2)])
- >>> g
- GlyphCoordinates([(1, 2)])
- >>> g2 = +g
- >>> g2
- GlyphCoordinates([(1, 2)])
- >>> g2.translate((1,0))
- >>> g2
- GlyphCoordinates([(2, 2)])
- >>> g
- GlyphCoordinates([(1, 2)])
- """
- return self.copy()
- def __neg__(self):
- """
- >>> g = GlyphCoordinates([(1,2)])
- >>> g
- GlyphCoordinates([(1, 2)])
- >>> g2 = -g
- >>> g2
- GlyphCoordinates([(-1, -2)])
- >>> g
- GlyphCoordinates([(1, 2)])
- """
- r = self.copy()
- a = r._a
- for i in range(len(a)):
- a[i] = -a[i]
- return r
- def __round__(self, *, round=otRound):
- r = self.copy()
- r.toInt(round=round)
- return r
-
- def __add__(self, other): return self.copy().__iadd__(other)
- def __sub__(self, other): return self.copy().__isub__(other)
- def __mul__(self, other): return self.copy().__imul__(other)
- def __truediv__(self, other): return self.copy().__itruediv__(other)
-
- __radd__ = __add__
- __rmul__ = __mul__
- def __rsub__(self, other): return other + (-self)
-
- def __iadd__(self, other):
- """
- >>> g = GlyphCoordinates([(1,2)])
- >>> g += (.5,0)
- >>> g
- GlyphCoordinates([(1.5, 2)])
- >>> g2 = GlyphCoordinates([(3,4)])
- >>> g += g2
- >>> g
- GlyphCoordinates([(4.5, 6)])
- """
- if isinstance(other, tuple):
- assert len(other) == 2
- self.translate(other)
- return self
- if isinstance(other, GlyphCoordinates):
- other = other._a
- a = self._a
- assert len(a) == len(other)
- for i in range(len(a)):
- a[i] += other[i]
- return self
- return NotImplemented
-
- def __isub__(self, other):
- """
- >>> g = GlyphCoordinates([(1,2)])
- >>> g -= (.5,0)
- >>> g
- GlyphCoordinates([(0.5, 2)])
- >>> g2 = GlyphCoordinates([(3,4)])
- >>> g -= g2
- >>> g
- GlyphCoordinates([(-2.5, -2)])
- """
- if isinstance(other, tuple):
- assert len(other) == 2
- self.translate((-other[0],-other[1]))
- return self
- if isinstance(other, GlyphCoordinates):
- other = other._a
- a = self._a
- assert len(a) == len(other)
- for i in range(len(a)):
- a[i] -= other[i]
- return self
- return NotImplemented
-
- def __imul__(self, other):
- """
- >>> g = GlyphCoordinates([(1,2)])
- >>> g *= (2,.5)
- >>> g *= 2
- >>> g
- GlyphCoordinates([(4, 2)])
- >>> g = GlyphCoordinates([(1,2)])
- >>> g *= 2
- >>> g
- GlyphCoordinates([(2, 4)])
- """
- if isinstance(other, tuple):
- assert len(other) == 2
- self.scale(other)
- return self
- if isinstance(other, Number):
- if other == 1:
- return self
- a = self._a
- for i in range(len(a)):
- a[i] *= other
- return self
- return NotImplemented
-
- def __itruediv__(self, other):
- """
- >>> g = GlyphCoordinates([(1,3)])
- >>> g /= (.5,1.5)
- >>> g /= 2
- >>> g
- GlyphCoordinates([(1, 1)])
- """
- if isinstance(other, Number):
- other = (other, other)
- if isinstance(other, tuple):
- if other == (1,1):
- return self
- assert len(other) == 2
- self.scale((1./other[0],1./other[1]))
- return self
- return NotImplemented
-
- def __bool__(self):
- """
- >>> g = GlyphCoordinates([])
- >>> bool(g)
- False
- >>> g = GlyphCoordinates([(0,0), (0.,0)])
- >>> bool(g)
- True
- >>> g = GlyphCoordinates([(0,0), (1,0)])
- >>> bool(g)
- True
- >>> g = GlyphCoordinates([(0,.5), (0,0)])
- >>> bool(g)
- True
- """
- return bool(self._a)
-
- __nonzero__ = __bool__
+ """A list of glyph coordinates.
+
+ Unlike an ordinary list, this is a numpy-like matrix object which supports
+ matrix addition, scalar multiplication and other operations described below.
+ """
+
+ def __init__(self, iterable=[]):
+ self._a = array.array("d")
+ self.extend(iterable)
+
+ @property
+ def array(self):
+ """Returns the underlying array of coordinates"""
+ return self._a
+
+ @staticmethod
+ def zeros(count):
+ """Creates a new ``GlyphCoordinates`` object with all coordinates set to (0,0)"""
+ g = GlyphCoordinates()
+ g._a.frombytes(bytes(count * 2 * g._a.itemsize))
+ return g
+
+ def copy(self):
+ """Creates a new ``GlyphCoordinates`` object which is a copy of the current one."""
+ c = GlyphCoordinates()
+ c._a.extend(self._a)
+ return c
+
+ def __len__(self):
+ """Returns the number of coordinates in the array."""
+ return len(self._a) // 2
+
+ def __getitem__(self, k):
+ """Returns a two element tuple (x,y)"""
+ a = self._a
+ if isinstance(k, slice):
+ indices = range(*k.indices(len(self)))
+ # Instead of calling ourselves recursively, duplicate code; faster
+ ret = []
+ for k in indices:
+ x = a[2 * k]
+ y = a[2 * k + 1]
+ ret.append(
+ (int(x) if x.is_integer() else x, int(y) if y.is_integer() else y)
+ )
+ return ret
+ x = a[2 * k]
+ y = a[2 * k + 1]
+ return (int(x) if x.is_integer() else x, int(y) if y.is_integer() else y)
+
+ def __setitem__(self, k, v):
+ """Sets a point's coordinates to a two element tuple (x,y)"""
+ if isinstance(k, slice):
+ indices = range(*k.indices(len(self)))
+ # XXX This only works if len(v) == len(indices)
+ for j, i in enumerate(indices):
+ self[i] = v[j]
+ return
+ self._a[2 * k], self._a[2 * k + 1] = v
+
+ def __delitem__(self, i):
+ """Removes a point from the list"""
+ i = (2 * i) % len(self._a)
+ del self._a[i]
+ del self._a[i]
+
+ def __repr__(self):
+ return "GlyphCoordinates([" + ",".join(str(c) for c in self) + "])"
+
+ def append(self, p):
+ self._a.extend(tuple(p))
+
+ def extend(self, iterable):
+ for p in iterable:
+ self._a.extend(p)
+
+ def toInt(self, *, round=otRound):
+ if round is noRound:
+ return
+ a = self._a
+ for i in range(len(a)):
+ a[i] = round(a[i])
+
+ def calcBounds(self):
+ a = self._a
+ if not a:
+ return 0, 0, 0, 0
+ xs = a[0::2]
+ ys = a[1::2]
+ return min(xs), min(ys), max(xs), max(ys)
+
+ def calcIntBounds(self, round=otRound):
+ return tuple(round(v) for v in self.calcBounds())
+
+ def relativeToAbsolute(self):
+ a = self._a
+ x, y = 0, 0
+ for i in range(0, len(a), 2):
+ a[i] = x = a[i] + x
+ a[i + 1] = y = a[i + 1] + y
+
+ def absoluteToRelative(self):
+ a = self._a
+ x, y = 0, 0
+ for i in range(0, len(a), 2):
+ nx = a[i]
+ ny = a[i + 1]
+ a[i] = nx - x
+ a[i + 1] = ny - y
+ x = nx
+ y = ny
+
+ def translate(self, p):
+ """
+ >>> GlyphCoordinates([(1,2)]).translate((.5,0))
+ """
+ x, y = p
+ if x == 0 and y == 0:
+ return
+ a = self._a
+ for i in range(0, len(a), 2):
+ a[i] += x
+ a[i + 1] += y
+
+ def scale(self, p):
+ """
+ >>> GlyphCoordinates([(1,2)]).scale((.5,0))
+ """
+ x, y = p
+ if x == 1 and y == 1:
+ return
+ a = self._a
+ for i in range(0, len(a), 2):
+ a[i] *= x
+ a[i + 1] *= y
+
+ def transform(self, t):
+ """
+ >>> GlyphCoordinates([(1,2)]).transform(((.5,0),(.2,.5)))
+ """
+ a = self._a
+ for i in range(0, len(a), 2):
+ x = a[i]
+ y = a[i + 1]
+ px = x * t[0][0] + y * t[1][0]
+ py = x * t[0][1] + y * t[1][1]
+ a[i] = px
+ a[i + 1] = py
+
+ def __eq__(self, other):
+ """
+ >>> g = GlyphCoordinates([(1,2)])
+ >>> g2 = GlyphCoordinates([(1.0,2)])
+ >>> g3 = GlyphCoordinates([(1.5,2)])
+ >>> g == g2
+ True
+ >>> g == g3
+ False
+ >>> g2 == g3
+ False
+ """
+ if type(self) != type(other):
+ return NotImplemented
+ return self._a == other._a
+
+ def __ne__(self, other):
+ """
+ >>> g = GlyphCoordinates([(1,2)])
+ >>> g2 = GlyphCoordinates([(1.0,2)])
+ >>> g3 = GlyphCoordinates([(1.5,2)])
+ >>> g != g2
+ False
+ >>> g != g3
+ True
+ >>> g2 != g3
+ True
+ """
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
+
+ # Math operations
+
+ def __pos__(self):
+ """
+ >>> g = GlyphCoordinates([(1,2)])
+ >>> g
+ GlyphCoordinates([(1, 2)])
+ >>> g2 = +g
+ >>> g2
+ GlyphCoordinates([(1, 2)])
+ >>> g2.translate((1,0))
+ >>> g2
+ GlyphCoordinates([(2, 2)])
+ >>> g
+ GlyphCoordinates([(1, 2)])
+ """
+ return self.copy()
+
+ def __neg__(self):
+ """
+ >>> g = GlyphCoordinates([(1,2)])
+ >>> g
+ GlyphCoordinates([(1, 2)])
+ >>> g2 = -g
+ >>> g2
+ GlyphCoordinates([(-1, -2)])
+ >>> g
+ GlyphCoordinates([(1, 2)])
+ """
+ r = self.copy()
+ a = r._a
+ for i in range(len(a)):
+ a[i] = -a[i]
+ return r
+
+ def __round__(self, *, round=otRound):
+ r = self.copy()
+ r.toInt(round=round)
+ return r
+
+ def __add__(self, other):
+ return self.copy().__iadd__(other)
+
+ def __sub__(self, other):
+ return self.copy().__isub__(other)
+
+ def __mul__(self, other):
+ return self.copy().__imul__(other)
+
+ def __truediv__(self, other):
+ return self.copy().__itruediv__(other)
+
+ __radd__ = __add__
+ __rmul__ = __mul__
+
+ def __rsub__(self, other):
+ return other + (-self)
+
+ def __iadd__(self, other):
+ """
+ >>> g = GlyphCoordinates([(1,2)])
+ >>> g += (.5,0)
+ >>> g
+ GlyphCoordinates([(1.5, 2)])
+ >>> g2 = GlyphCoordinates([(3,4)])
+ >>> g += g2
+ >>> g
+ GlyphCoordinates([(4.5, 6)])
+ """
+ if isinstance(other, tuple):
+ assert len(other) == 2
+ self.translate(other)
+ return self
+ if isinstance(other, GlyphCoordinates):
+ other = other._a
+ a = self._a
+ assert len(a) == len(other)
+ for i in range(len(a)):
+ a[i] += other[i]
+ return self
+ return NotImplemented
+
+ def __isub__(self, other):
+ """
+ >>> g = GlyphCoordinates([(1,2)])
+ >>> g -= (.5,0)
+ >>> g
+ GlyphCoordinates([(0.5, 2)])
+ >>> g2 = GlyphCoordinates([(3,4)])
+ >>> g -= g2
+ >>> g
+ GlyphCoordinates([(-2.5, -2)])
+ """
+ if isinstance(other, tuple):
+ assert len(other) == 2
+ self.translate((-other[0], -other[1]))
+ return self
+ if isinstance(other, GlyphCoordinates):
+ other = other._a
+ a = self._a
+ assert len(a) == len(other)
+ for i in range(len(a)):
+ a[i] -= other[i]
+ return self
+ return NotImplemented
+
+ def __imul__(self, other):
+ """
+ >>> g = GlyphCoordinates([(1,2)])
+ >>> g *= (2,.5)
+ >>> g *= 2
+ >>> g
+ GlyphCoordinates([(4, 2)])
+ >>> g = GlyphCoordinates([(1,2)])
+ >>> g *= 2
+ >>> g
+ GlyphCoordinates([(2, 4)])
+ """
+ if isinstance(other, tuple):
+ assert len(other) == 2
+ self.scale(other)
+ return self
+ if isinstance(other, Number):
+ if other == 1:
+ return self
+ a = self._a
+ for i in range(len(a)):
+ a[i] *= other
+ return self
+ return NotImplemented
+
+ def __itruediv__(self, other):
+ """
+ >>> g = GlyphCoordinates([(1,3)])
+ >>> g /= (.5,1.5)
+ >>> g /= 2
+ >>> g
+ GlyphCoordinates([(1, 1)])
+ """
+ if isinstance(other, Number):
+ other = (other, other)
+ if isinstance(other, tuple):
+ if other == (1, 1):
+ return self
+ assert len(other) == 2
+ self.scale((1.0 / other[0], 1.0 / other[1]))
+ return self
+ return NotImplemented
+
+ def __bool__(self):
+ """
+ >>> g = GlyphCoordinates([])
+ >>> bool(g)
+ False
+ >>> g = GlyphCoordinates([(0,0), (0.,0)])
+ >>> bool(g)
+ True
+ >>> g = GlyphCoordinates([(0,0), (1,0)])
+ >>> bool(g)
+ True
+ >>> g = GlyphCoordinates([(0,.5), (0,0)])
+ >>> bool(g)
+ True
+ """
+ return bool(self._a)
+
+ __nonzero__ = __bool__
if __name__ == "__main__":
- import doctest, sys
- sys.exit(doctest.testmod().failed)
+ import doctest, sys
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/ttLib/tables/_g_v_a_r.py b/Lib/fontTools/ttLib/tables/_g_v_a_r.py
index dd198f4b..11485bf0 100644
--- a/Lib/fontTools/ttLib/tables/_g_v_a_r.py
+++ b/Lib/fontTools/ttLib/tables/_g_v_a_r.py
@@ -1,3 +1,4 @@
+from collections import UserDict, deque
from functools import partial
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
@@ -37,238 +38,247 @@ GVAR_HEADER_FORMAT = """
GVAR_HEADER_SIZE = sstruct.calcsize(GVAR_HEADER_FORMAT)
-class _lazy_dict(dict):
- def get(self, k, *args):
- v = super().get(k, *args)
- if callable(v):
- v = v()
- self[k] = v
- return v
+class _LazyDict(UserDict):
+ def __init__(self, data):
+ super().__init__()
+ self.data = data
def __getitem__(self, k):
- v = super().__getitem__(k)
+ v = self.data[k]
if callable(v):
v = v()
- self[k] = v
+ self.data[k] = v
return v
- def items(self):
- if not hasattr(self, '_loaded'):
- self._load()
- return super().items()
-
- def values(self):
- if not hasattr(self, '_loaded'):
- self._load()
- return super().values()
-
- def __eq__(self, other):
- if not hasattr(self, '_loaded'):
- self._load()
- return super().__eq__(other)
-
- def __neq__(self, other):
- if not hasattr(self, '_loaded'):
- self._load()
- return super().__neq__(other)
-
- def _load(self):
- for k in self:
- self[k]
- self._loaded = True
class table__g_v_a_r(DefaultTable.DefaultTable):
- dependencies = ["fvar", "glyf"]
-
- def __init__(self, tag=None):
- DefaultTable.DefaultTable.__init__(self, tag)
- self.version, self.reserved = 1, 0
- self.variations = {}
-
- def compile(self, ttFont):
- axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
- sharedTuples = tv.compileSharedTuples(
- axisTags, itertools.chain(*self.variations.values()))
- sharedTupleIndices = {coord:i for i, coord in enumerate(sharedTuples)}
- sharedTupleSize = sum([len(c) for c in sharedTuples])
- compiledGlyphs = self.compileGlyphs_(
- ttFont, axisTags, sharedTupleIndices)
- offset = 0
- offsets = []
- for glyph in compiledGlyphs:
- offsets.append(offset)
- offset += len(glyph)
- offsets.append(offset)
- compiledOffsets, tableFormat = self.compileOffsets_(offsets)
-
- header = {}
- header["version"] = self.version
- header["reserved"] = self.reserved
- header["axisCount"] = len(axisTags)
- header["sharedTupleCount"] = len(sharedTuples)
- header["offsetToSharedTuples"] = GVAR_HEADER_SIZE + len(compiledOffsets)
- header["glyphCount"] = len(compiledGlyphs)
- header["flags"] = tableFormat
- header["offsetToGlyphVariationData"] = header["offsetToSharedTuples"] + sharedTupleSize
- compiledHeader = sstruct.pack(GVAR_HEADER_FORMAT, header)
-
- result = [compiledHeader, compiledOffsets]
- result.extend(sharedTuples)
- result.extend(compiledGlyphs)
- return b''.join(result)
-
- def compileGlyphs_(self, ttFont, axisTags, sharedCoordIndices):
- result = []
- glyf = ttFont['glyf']
- for glyphName in ttFont.getGlyphOrder():
- glyph = glyf[glyphName]
- pointCount = self.getNumPoints_(glyph)
- variations = self.variations.get(glyphName, [])
- result.append(compileGlyph_(variations, pointCount,
- axisTags, sharedCoordIndices))
- return result
-
- def decompile(self, data, ttFont):
- axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
- glyphs = ttFont.getGlyphOrder()
- sstruct.unpack(GVAR_HEADER_FORMAT, data[0:GVAR_HEADER_SIZE], self)
- assert len(glyphs) == self.glyphCount
- assert len(axisTags) == self.axisCount
- offsets = self.decompileOffsets_(data[GVAR_HEADER_SIZE:], tableFormat=(self.flags & 1), glyphCount=self.glyphCount)
- sharedCoords = tv.decompileSharedTuples(
- axisTags, self.sharedTupleCount, data, self.offsetToSharedTuples)
- self.variations = _lazy_dict()
- offsetToData = self.offsetToGlyphVariationData
- glyf = ttFont['glyf']
-
- def decompileVarGlyph(glyphName, gid):
- glyph = glyf[glyphName]
- numPointsInGlyph = self.getNumPoints_(glyph)
- gvarData = data[offsetToData + offsets[gid] : offsetToData + offsets[gid + 1]]
- return decompileGlyph_(numPointsInGlyph, sharedCoords, axisTags, gvarData)
-
- for gid in range(self.glyphCount):
- glyphName = glyphs[gid]
- self.variations[glyphName] = partial(decompileVarGlyph, glyphName, gid)
-
- @staticmethod
- def decompileOffsets_(data, tableFormat, glyphCount):
- if tableFormat == 0:
- # Short format: array of UInt16
- offsets = array.array("H")
- offsetsSize = (glyphCount + 1) * 2
- else:
- # Long format: array of UInt32
- offsets = array.array("I")
- offsetsSize = (glyphCount + 1) * 4
- offsets.frombytes(data[0 : offsetsSize])
- if sys.byteorder != "big": offsets.byteswap()
-
- # In the short format, offsets need to be multiplied by 2.
- # This is not documented in Apple's TrueType specification,
- # but can be inferred from the FreeType implementation, and
- # we could verify it with two sample GX fonts.
- if tableFormat == 0:
- offsets = [off * 2 for off in offsets]
-
- return offsets
-
- @staticmethod
- def compileOffsets_(offsets):
- """Packs a list of offsets into a 'gvar' offset table.
-
- Returns a pair (bytestring, tableFormat). Bytestring is the
- packed offset table. Format indicates whether the table
- uses short (tableFormat=0) or long (tableFormat=1) integers.
- The returned tableFormat should get packed into the flags field
- of the 'gvar' header.
- """
- assert len(offsets) >= 2
- for i in range(1, len(offsets)):
- assert offsets[i - 1] <= offsets[i]
- if max(offsets) <= 0xffff * 2:
- packed = array.array("H", [n >> 1 for n in offsets])
- tableFormat = 0
- else:
- packed = array.array("I", offsets)
- tableFormat = 1
- if sys.byteorder != "big": packed.byteswap()
- return (packed.tobytes(), tableFormat)
-
- def toXML(self, writer, ttFont):
- writer.simpletag("version", value=self.version)
- writer.newline()
- writer.simpletag("reserved", value=self.reserved)
- writer.newline()
- axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
- for glyphName in ttFont.getGlyphNames():
- variations = self.variations.get(glyphName)
- if not variations:
- continue
- writer.begintag("glyphVariations", glyph=glyphName)
- writer.newline()
- for gvar in variations:
- gvar.toXML(writer, axisTags)
- writer.endtag("glyphVariations")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "version":
- self.version = safeEval(attrs["value"])
- elif name == "reserved":
- self.reserved = safeEval(attrs["value"])
- elif name == "glyphVariations":
- if not hasattr(self, "variations"):
- self.variations = {}
- glyphName = attrs["glyph"]
- glyph = ttFont["glyf"][glyphName]
- numPointsInGlyph = self.getNumPoints_(glyph)
- glyphVariations = []
- for element in content:
- if isinstance(element, tuple):
- name, attrs, content = element
- if name == "tuple":
- gvar = TupleVariation({}, [None] * numPointsInGlyph)
- glyphVariations.append(gvar)
- for tupleElement in content:
- if isinstance(tupleElement, tuple):
- tupleName, tupleAttrs, tupleContent = tupleElement
- gvar.fromXML(tupleName, tupleAttrs, tupleContent)
- self.variations[glyphName] = glyphVariations
-
- @staticmethod
- def getNumPoints_(glyph):
- NUM_PHANTOM_POINTS = 4
- if glyph.isComposite():
- return len(glyph.components) + NUM_PHANTOM_POINTS
- else:
- # Empty glyphs (eg. space, nonmarkingreturn) have no "coordinates" attribute.
- return len(getattr(glyph, "coordinates", [])) + NUM_PHANTOM_POINTS
+ dependencies = ["fvar", "glyf"]
+
+ def __init__(self, tag=None):
+ DefaultTable.DefaultTable.__init__(self, tag)
+ self.version, self.reserved = 1, 0
+ self.variations = {}
+
+ def compile(self, ttFont):
+ axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
+ sharedTuples = tv.compileSharedTuples(
+ axisTags, itertools.chain(*self.variations.values())
+ )
+ sharedTupleIndices = {coord: i for i, coord in enumerate(sharedTuples)}
+ sharedTupleSize = sum([len(c) for c in sharedTuples])
+ compiledGlyphs = self.compileGlyphs_(ttFont, axisTags, sharedTupleIndices)
+ offset = 0
+ offsets = []
+ for glyph in compiledGlyphs:
+ offsets.append(offset)
+ offset += len(glyph)
+ offsets.append(offset)
+ compiledOffsets, tableFormat = self.compileOffsets_(offsets)
+
+ header = {}
+ header["version"] = self.version
+ header["reserved"] = self.reserved
+ header["axisCount"] = len(axisTags)
+ header["sharedTupleCount"] = len(sharedTuples)
+ header["offsetToSharedTuples"] = GVAR_HEADER_SIZE + len(compiledOffsets)
+ header["glyphCount"] = len(compiledGlyphs)
+ header["flags"] = tableFormat
+ header["offsetToGlyphVariationData"] = (
+ header["offsetToSharedTuples"] + sharedTupleSize
+ )
+ compiledHeader = sstruct.pack(GVAR_HEADER_FORMAT, header)
+
+ result = [compiledHeader, compiledOffsets]
+ result.extend(sharedTuples)
+ result.extend(compiledGlyphs)
+ return b"".join(result)
+
+ def compileGlyphs_(self, ttFont, axisTags, sharedCoordIndices):
+ result = []
+ glyf = ttFont["glyf"]
+ for glyphName in ttFont.getGlyphOrder():
+ variations = self.variations.get(glyphName, [])
+ if not variations:
+ result.append(b"")
+ continue
+ pointCountUnused = 0 # pointCount is actually unused by compileGlyph
+ result.append(
+ compileGlyph_(
+ variations, pointCountUnused, axisTags, sharedCoordIndices
+ )
+ )
+ return result
+
+ def decompile(self, data, ttFont):
+ axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
+ glyphs = ttFont.getGlyphOrder()
+ sstruct.unpack(GVAR_HEADER_FORMAT, data[0:GVAR_HEADER_SIZE], self)
+ assert len(glyphs) == self.glyphCount
+ assert len(axisTags) == self.axisCount
+ offsets = self.decompileOffsets_(
+ data[GVAR_HEADER_SIZE:],
+ tableFormat=(self.flags & 1),
+ glyphCount=self.glyphCount,
+ )
+ sharedCoords = tv.decompileSharedTuples(
+ axisTags, self.sharedTupleCount, data, self.offsetToSharedTuples
+ )
+ variations = {}
+ offsetToData = self.offsetToGlyphVariationData
+ glyf = ttFont["glyf"]
+
+ def decompileVarGlyph(glyphName, gid):
+ gvarData = data[
+ offsetToData + offsets[gid] : offsetToData + offsets[gid + 1]
+ ]
+ if not gvarData:
+ return []
+ glyph = glyf[glyphName]
+ numPointsInGlyph = self.getNumPoints_(glyph)
+ return decompileGlyph_(numPointsInGlyph, sharedCoords, axisTags, gvarData)
+
+ for gid in range(self.glyphCount):
+ glyphName = glyphs[gid]
+ variations[glyphName] = partial(decompileVarGlyph, glyphName, gid)
+ self.variations = _LazyDict(variations)
+
+ if ttFont.lazy is False: # Be lazy for None and True
+ self.ensureDecompiled()
+
+ def ensureDecompiled(self, recurse=False):
+ # The recurse argument is unused, but part of the signature of
+ # ensureDecompiled across the library.
+ # Use a zero-length deque to consume the lazy dict
+ deque(self.variations.values(), maxlen=0)
+
+ @staticmethod
+ def decompileOffsets_(data, tableFormat, glyphCount):
+ if tableFormat == 0:
+ # Short format: array of UInt16
+ offsets = array.array("H")
+ offsetsSize = (glyphCount + 1) * 2
+ else:
+ # Long format: array of UInt32
+ offsets = array.array("I")
+ offsetsSize = (glyphCount + 1) * 4
+ offsets.frombytes(data[0:offsetsSize])
+ if sys.byteorder != "big":
+ offsets.byteswap()
+
+ # In the short format, offsets need to be multiplied by 2.
+ # This is not documented in Apple's TrueType specification,
+ # but can be inferred from the FreeType implementation, and
+ # we could verify it with two sample GX fonts.
+ if tableFormat == 0:
+ offsets = [off * 2 for off in offsets]
+
+ return offsets
+
+ @staticmethod
+ def compileOffsets_(offsets):
+ """Packs a list of offsets into a 'gvar' offset table.
+
+ Returns a pair (bytestring, tableFormat). Bytestring is the
+ packed offset table. Format indicates whether the table
+ uses short (tableFormat=0) or long (tableFormat=1) integers.
+ The returned tableFormat should get packed into the flags field
+ of the 'gvar' header.
+ """
+ assert len(offsets) >= 2
+ for i in range(1, len(offsets)):
+ assert offsets[i - 1] <= offsets[i]
+ if max(offsets) <= 0xFFFF * 2:
+ packed = array.array("H", [n >> 1 for n in offsets])
+ tableFormat = 0
+ else:
+ packed = array.array("I", offsets)
+ tableFormat = 1
+ if sys.byteorder != "big":
+ packed.byteswap()
+ return (packed.tobytes(), tableFormat)
+
+ def toXML(self, writer, ttFont):
+ writer.simpletag("version", value=self.version)
+ writer.newline()
+ writer.simpletag("reserved", value=self.reserved)
+ writer.newline()
+ axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
+ for glyphName in ttFont.getGlyphNames():
+ variations = self.variations.get(glyphName)
+ if not variations:
+ continue
+ writer.begintag("glyphVariations", glyph=glyphName)
+ writer.newline()
+ for gvar in variations:
+ gvar.toXML(writer, axisTags)
+ writer.endtag("glyphVariations")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "version":
+ self.version = safeEval(attrs["value"])
+ elif name == "reserved":
+ self.reserved = safeEval(attrs["value"])
+ elif name == "glyphVariations":
+ if not hasattr(self, "variations"):
+ self.variations = {}
+ glyphName = attrs["glyph"]
+ glyph = ttFont["glyf"][glyphName]
+ numPointsInGlyph = self.getNumPoints_(glyph)
+ glyphVariations = []
+ for element in content:
+ if isinstance(element, tuple):
+ name, attrs, content = element
+ if name == "tuple":
+ gvar = TupleVariation({}, [None] * numPointsInGlyph)
+ glyphVariations.append(gvar)
+ for tupleElement in content:
+ if isinstance(tupleElement, tuple):
+ tupleName, tupleAttrs, tupleContent = tupleElement
+ gvar.fromXML(tupleName, tupleAttrs, tupleContent)
+ self.variations[glyphName] = glyphVariations
+
+ @staticmethod
+ def getNumPoints_(glyph):
+ NUM_PHANTOM_POINTS = 4
+
+ if glyph.isComposite():
+ return len(glyph.components) + NUM_PHANTOM_POINTS
+ elif glyph.isVarComposite():
+ count = 0
+ for component in glyph.components:
+ count += component.getPointCount()
+ return count + NUM_PHANTOM_POINTS
+ else:
+ # Empty glyphs (eg. space, nonmarkingreturn) have no "coordinates" attribute.
+ return len(getattr(glyph, "coordinates", [])) + NUM_PHANTOM_POINTS
def compileGlyph_(variations, pointCount, axisTags, sharedCoordIndices):
- tupleVariationCount, tuples, data = tv.compileTupleVariationStore(
- variations, pointCount, axisTags, sharedCoordIndices)
- if tupleVariationCount == 0:
- return b""
- result = [
- struct.pack(">HH", tupleVariationCount, 4 + len(tuples)),
- tuples,
- data
- ]
- if (len(tuples) + len(data)) % 2 != 0:
- result.append(b"\0") # padding
- return b''.join(result)
+ tupleVariationCount, tuples, data = tv.compileTupleVariationStore(
+ variations, pointCount, axisTags, sharedCoordIndices
+ )
+ if tupleVariationCount == 0:
+ return b""
+ result = [struct.pack(">HH", tupleVariationCount, 4 + len(tuples)), tuples, data]
+ if (len(tuples) + len(data)) % 2 != 0:
+ result.append(b"\0") # padding
+ return b"".join(result)
def decompileGlyph_(pointCount, sharedTuples, axisTags, data):
- if len(data) < 4:
- return []
- tupleVariationCount, offsetToData = struct.unpack(">HH", data[:4])
- dataPos = offsetToData
- return tv.decompileTupleVariationStore(
- "gvar", axisTags,
- tupleVariationCount, pointCount,
- sharedTuples, data, 4, offsetToData
- )
+ if len(data) < 4:
+ return []
+ tupleVariationCount, offsetToData = struct.unpack(">HH", data[:4])
+ dataPos = offsetToData
+ return tv.decompileTupleVariationStore(
+ "gvar",
+ axisTags,
+ tupleVariationCount,
+ pointCount,
+ sharedTuples,
+ data,
+ 4,
+ offsetToData,
+ )
diff --git a/Lib/fontTools/ttLib/tables/_h_d_m_x.py b/Lib/fontTools/ttLib/tables/_h_d_m_x.py
index 9f860d2a..b6d56a7e 100644
--- a/Lib/fontTools/ttLib/tables/_h_d_m_x.py
+++ b/Lib/fontTools/ttLib/tables/_h_d_m_x.py
@@ -11,106 +11,109 @@ hdmxHeaderFormat = """
recordSize: l
"""
+
class _GlyphnamedList(Mapping):
+ def __init__(self, reverseGlyphOrder, data):
+ self._array = data
+ self._map = dict(reverseGlyphOrder)
- def __init__(self, reverseGlyphOrder, data):
- self._array = data
- self._map = dict(reverseGlyphOrder)
+ def __getitem__(self, k):
+ return self._array[self._map[k]]
- def __getitem__(self, k):
- return self._array[self._map[k]]
+ def __len__(self):
+ return len(self._map)
- def __len__(self):
- return len(self._map)
+ def __iter__(self):
+ return iter(self._map)
- def __iter__(self):
- return iter(self._map)
+ def keys(self):
+ return self._map.keys()
- def keys(self):
- return self._map.keys()
class table__h_d_m_x(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ numGlyphs = ttFont["maxp"].numGlyphs
+ glyphOrder = ttFont.getGlyphOrder()
+ dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self)
+ self.hdmx = {}
+ for i in range(self.numRecords):
+ ppem = byteord(data[0])
+ maxSize = byteord(data[1])
+ widths = _GlyphnamedList(
+ ttFont.getReverseGlyphMap(), array.array("B", data[2 : 2 + numGlyphs])
+ )
+ self.hdmx[ppem] = widths
+ data = data[self.recordSize :]
+ assert len(data) == 0, "too much hdmx data"
- def decompile(self, data, ttFont):
- numGlyphs = ttFont['maxp'].numGlyphs
- glyphOrder = ttFont.getGlyphOrder()
- dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self)
- self.hdmx = {}
- for i in range(self.numRecords):
- ppem = byteord(data[0])
- maxSize = byteord(data[1])
- widths = _GlyphnamedList(ttFont.getReverseGlyphMap(), array.array("B", data[2:2+numGlyphs]))
- self.hdmx[ppem] = widths
- data = data[self.recordSize:]
- assert len(data) == 0, "too much hdmx data"
+ def compile(self, ttFont):
+ self.version = 0
+ numGlyphs = ttFont["maxp"].numGlyphs
+ glyphOrder = ttFont.getGlyphOrder()
+ self.recordSize = 4 * ((2 + numGlyphs + 3) // 4)
+ pad = (self.recordSize - 2 - numGlyphs) * b"\0"
+ self.numRecords = len(self.hdmx)
+ data = sstruct.pack(hdmxHeaderFormat, self)
+ items = sorted(self.hdmx.items())
+ for ppem, widths in items:
+ data = data + bytechr(ppem) + bytechr(max(widths.values()))
+ for glyphID in range(len(glyphOrder)):
+ width = widths[glyphOrder[glyphID]]
+ data = data + bytechr(width)
+ data = data + pad
+ return data
- def compile(self, ttFont):
- self.version = 0
- numGlyphs = ttFont['maxp'].numGlyphs
- glyphOrder = ttFont.getGlyphOrder()
- self.recordSize = 4 * ((2 + numGlyphs + 3) // 4)
- pad = (self.recordSize - 2 - numGlyphs) * b"\0"
- self.numRecords = len(self.hdmx)
- data = sstruct.pack(hdmxHeaderFormat, self)
- items = sorted(self.hdmx.items())
- for ppem, widths in items:
- data = data + bytechr(ppem) + bytechr(max(widths.values()))
- for glyphID in range(len(glyphOrder)):
- width = widths[glyphOrder[glyphID]]
- data = data + bytechr(width)
- data = data + pad
- return data
+ def toXML(self, writer, ttFont):
+ writer.begintag("hdmxData")
+ writer.newline()
+ ppems = sorted(self.hdmx.keys())
+ records = []
+ format = ""
+ for ppem in ppems:
+ widths = self.hdmx[ppem]
+ records.append(widths)
+ format = format + "%4d"
+ glyphNames = ttFont.getGlyphOrder()[:]
+ glyphNames.sort()
+ maxNameLen = max(map(len, glyphNames))
+ format = "%" + repr(maxNameLen) + "s:" + format + " ;"
+ writer.write(format % (("ppem",) + tuple(ppems)))
+ writer.newline()
+ writer.newline()
+ for glyphName in glyphNames:
+ row = []
+ for ppem in ppems:
+ widths = self.hdmx[ppem]
+ row.append(widths[glyphName])
+ if ";" in glyphName:
+ glyphName = "\\x3b".join(glyphName.split(";"))
+ writer.write(format % ((glyphName,) + tuple(row)))
+ writer.newline()
+ writer.endtag("hdmxData")
+ writer.newline()
- def toXML(self, writer, ttFont):
- writer.begintag("hdmxData")
- writer.newline()
- ppems = sorted(self.hdmx.keys())
- records = []
- format = ""
- for ppem in ppems:
- widths = self.hdmx[ppem]
- records.append(widths)
- format = format + "%4d"
- glyphNames = ttFont.getGlyphOrder()[:]
- glyphNames.sort()
- maxNameLen = max(map(len, glyphNames))
- format = "%" + repr(maxNameLen) + 's:' + format + ' ;'
- writer.write(format % (("ppem",) + tuple(ppems)))
- writer.newline()
- writer.newline()
- for glyphName in glyphNames:
- row = []
- for ppem in ppems:
- widths = self.hdmx[ppem]
- row.append(widths[glyphName])
- if ";" in glyphName:
- glyphName = "\\x3b".join(glyphName.split(";"))
- writer.write(format % ((glyphName,) + tuple(row)))
- writer.newline()
- writer.endtag("hdmxData")
- writer.newline()
+ def fromXML(self, name, attrs, content, ttFont):
+ if name != "hdmxData":
+ return
+ content = strjoin(content)
+ lines = content.split(";")
+ topRow = lines[0].split()
+ assert topRow[0] == "ppem:", "illegal hdmx format"
+ ppems = list(map(int, topRow[1:]))
+ self.hdmx = hdmx = {}
+ for ppem in ppems:
+ hdmx[ppem] = {}
+ lines = (line.split() for line in lines[1:])
+ for line in lines:
+ if not line:
+ continue
+ assert line[0][-1] == ":", "illegal hdmx format"
+ glyphName = line[0][:-1]
+ if "\\" in glyphName:
+ from fontTools.misc.textTools import safeEval
- def fromXML(self, name, attrs, content, ttFont):
- if name != "hdmxData":
- return
- content = strjoin(content)
- lines = content.split(";")
- topRow = lines[0].split()
- assert topRow[0] == "ppem:", "illegal hdmx format"
- ppems = list(map(int, topRow[1:]))
- self.hdmx = hdmx = {}
- for ppem in ppems:
- hdmx[ppem] = {}
- lines = (line.split() for line in lines[1:])
- for line in lines:
- if not line:
- continue
- assert line[0][-1] == ":", "illegal hdmx format"
- glyphName = line[0][:-1]
- if "\\" in glyphName:
- from fontTools.misc.textTools import safeEval
- glyphName = safeEval('"""' + glyphName + '"""')
- line = list(map(int, line[1:]))
- assert len(line) == len(ppems), "illegal hdmx format"
- for i in range(len(ppems)):
- hdmx[ppems[i]][glyphName] = line[i]
+ glyphName = safeEval('"""' + glyphName + '"""')
+ line = list(map(int, line[1:]))
+ assert len(line) == len(ppems), "illegal hdmx format"
+ for i in range(len(ppems)):
+ hdmx[ppems[i]][glyphName] = line[i]
diff --git a/Lib/fontTools/ttLib/tables/_h_e_a_d.py b/Lib/fontTools/ttLib/tables/_h_e_a_d.py
index 4d19da03..fe29c8fc 100644
--- a/Lib/fontTools/ttLib/tables/_h_e_a_d.py
+++ b/Lib/fontTools/ttLib/tables/_h_e_a_d.py
@@ -1,8 +1,12 @@
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr, strToFixedToFloat
from fontTools.misc.textTools import safeEval, num2binary, binary2num
-from fontTools.misc.timeTools import timestampFromString, timestampToString, timestampNow
-from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat
+from fontTools.misc.timeTools import (
+ timestampFromString,
+ timestampToString,
+ timestampNow,
+)
+from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat
from fontTools.misc.arrayTools import intRect, unionRect
from . import DefaultTable
import logging
@@ -31,87 +35,89 @@ headFormat = """
glyphDataFormat: h
"""
-class table__h_e_a_d(DefaultTable.DefaultTable):
- dependencies = ['maxp', 'loca', 'CFF ', 'CFF2']
+class table__h_e_a_d(DefaultTable.DefaultTable):
+ dependencies = ["maxp", "loca", "CFF ", "CFF2"]
- def decompile(self, data, ttFont):
- dummy, rest = sstruct.unpack2(headFormat, data, self)
- if rest:
- # this is quite illegal, but there seem to be fonts out there that do this
- log.warning("extra bytes at the end of 'head' table")
- assert rest == b"\0\0"
+ def decompile(self, data, ttFont):
+ dummy, rest = sstruct.unpack2(headFormat, data, self)
+ if rest:
+ # this is quite illegal, but there seem to be fonts out there that do this
+ log.warning("extra bytes at the end of 'head' table")
+ assert rest == b"\0\0"
- # For timestamp fields, ignore the top four bytes. Some fonts have
- # bogus values there. Since till 2038 those bytes only can be zero,
- # ignore them.
- #
- # https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810
- for stamp in 'created', 'modified':
- value = getattr(self, stamp)
- if value > 0xFFFFFFFF:
- log.warning("'%s' timestamp out of range; ignoring top bytes", stamp)
- value &= 0xFFFFFFFF
- setattr(self, stamp, value)
- if value < 0x7C259DC0: # January 1, 1970 00:00:00
- log.warning("'%s' timestamp seems very low; regarding as unix timestamp", stamp)
- value += 0x7C259DC0
- setattr(self, stamp, value)
+ # For timestamp fields, ignore the top four bytes. Some fonts have
+ # bogus values there. Since till 2038 those bytes only can be zero,
+ # ignore them.
+ #
+ # https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810
+ for stamp in "created", "modified":
+ value = getattr(self, stamp)
+ if value > 0xFFFFFFFF:
+ log.warning("'%s' timestamp out of range; ignoring top bytes", stamp)
+ value &= 0xFFFFFFFF
+ setattr(self, stamp, value)
+ if value < 0x7C259DC0: # January 1, 1970 00:00:00
+ log.warning(
+ "'%s' timestamp seems very low; regarding as unix timestamp", stamp
+ )
+ value += 0x7C259DC0
+ setattr(self, stamp, value)
- def compile(self, ttFont):
- if ttFont.recalcBBoxes:
- # For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().
- if 'CFF ' in ttFont:
- topDict = ttFont['CFF '].cff.topDictIndex[0]
- self.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox)
- elif 'CFF2' in ttFont:
- topDict = ttFont['CFF2'].cff.topDictIndex[0]
- charStrings = topDict.CharStrings
- fontBBox = None
- for charString in charStrings.values():
- bounds = charString.calcBounds(charStrings)
- if bounds is not None:
- if fontBBox is not None:
- fontBBox = unionRect(fontBBox, bounds)
- else:
- fontBBox = bounds
- if fontBBox is not None:
- self.xMin, self.yMin, self.xMax, self.yMax = intRect(fontBBox)
- if ttFont.recalcTimestamp:
- self.modified = timestampNow()
- data = sstruct.pack(headFormat, self)
- return data
+ def compile(self, ttFont):
+ if ttFont.recalcBBoxes:
+ # For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().
+ if "CFF " in ttFont:
+ topDict = ttFont["CFF "].cff.topDictIndex[0]
+ self.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox)
+ elif "CFF2" in ttFont:
+ topDict = ttFont["CFF2"].cff.topDictIndex[0]
+ charStrings = topDict.CharStrings
+ fontBBox = None
+ for charString in charStrings.values():
+ bounds = charString.calcBounds(charStrings)
+ if bounds is not None:
+ if fontBBox is not None:
+ fontBBox = unionRect(fontBBox, bounds)
+ else:
+ fontBBox = bounds
+ if fontBBox is not None:
+ self.xMin, self.yMin, self.xMax, self.yMax = intRect(fontBBox)
+ if ttFont.recalcTimestamp:
+ self.modified = timestampNow()
+ data = sstruct.pack(headFormat, self)
+ return data
- def toXML(self, writer, ttFont):
- writer.comment("Most of this table will be recalculated by the compiler")
- writer.newline()
- _, names, fixes = sstruct.getformat(headFormat)
- for name in names:
- value = getattr(self, name)
- if name in fixes:
- value = floatToFixedToStr(value, precisionBits=fixes[name])
- elif name in ("created", "modified"):
- value = timestampToString(value)
- elif name in ("magicNumber", "checkSumAdjustment"):
- if value < 0:
- value = value + 0x100000000
- value = hex(value)
- if value[-1:] == "L":
- value = value[:-1]
- elif name in ("macStyle", "flags"):
- value = num2binary(value, 16)
- writer.simpletag(name, value=value)
- writer.newline()
+ def toXML(self, writer, ttFont):
+ writer.comment("Most of this table will be recalculated by the compiler")
+ writer.newline()
+ _, names, fixes = sstruct.getformat(headFormat)
+ for name in names:
+ value = getattr(self, name)
+ if name in fixes:
+ value = floatToFixedToStr(value, precisionBits=fixes[name])
+ elif name in ("created", "modified"):
+ value = timestampToString(value)
+ elif name in ("magicNumber", "checkSumAdjustment"):
+ if value < 0:
+ value = value + 0x100000000
+ value = hex(value)
+ if value[-1:] == "L":
+ value = value[:-1]
+ elif name in ("macStyle", "flags"):
+ value = num2binary(value, 16)
+ writer.simpletag(name, value=value)
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- value = attrs["value"]
- fixes = sstruct.getformat(headFormat)[2]
- if name in fixes:
- value = strToFixedToFloat(value, precisionBits=fixes[name])
- elif name in ("created", "modified"):
- value = timestampFromString(value)
- elif name in ("macStyle", "flags"):
- value = binary2num(value)
- else:
- value = safeEval(value)
- setattr(self, name, value)
+ def fromXML(self, name, attrs, content, ttFont):
+ value = attrs["value"]
+ fixes = sstruct.getformat(headFormat)[2]
+ if name in fixes:
+ value = strToFixedToFloat(value, precisionBits=fixes[name])
+ elif name in ("created", "modified"):
+ value = timestampFromString(value)
+ elif name in ("macStyle", "flags"):
+ value = binary2num(value)
+ else:
+ value = safeEval(value)
+ setattr(self, name, value)
diff --git a/Lib/fontTools/ttLib/tables/_h_h_e_a.py b/Lib/fontTools/ttLib/tables/_h_h_e_a.py
index 9b8baaad..43e464f7 100644
--- a/Lib/fontTools/ttLib/tables/_h_h_e_a.py
+++ b/Lib/fontTools/ttLib/tables/_h_h_e_a.py
@@ -1,7 +1,9 @@
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from fontTools.misc.fixedTools import (
- ensureVersionIsLong as fi2ve, versionToFixed as ve2fi)
+ ensureVersionIsLong as fi2ve,
+ versionToFixed as ve2fi,
+)
from . import DefaultTable
import math
@@ -29,95 +31,105 @@ hheaFormat = """
class table__h_h_e_a(DefaultTable.DefaultTable):
-
- # Note: Keep in sync with table__v_h_e_a
-
- dependencies = ['hmtx', 'glyf', 'CFF ', 'CFF2']
-
- # OpenType spec renamed these, add aliases for compatibility
- @property
- def ascender(self): return self.ascent
-
- @ascender.setter
- def ascender(self,value): self.ascent = value
-
- @property
- def descender(self): return self.descent
-
- @descender.setter
- def descender(self,value): self.descent = value
-
- def decompile(self, data, ttFont):
- sstruct.unpack(hheaFormat, data, self)
-
- def compile(self, ttFont):
- if ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ') or ttFont.isLoaded('CFF2')):
- self.recalc(ttFont)
- self.tableVersion = fi2ve(self.tableVersion)
- return sstruct.pack(hheaFormat, self)
-
- def recalc(self, ttFont):
- if 'hmtx' in ttFont:
- hmtxTable = ttFont['hmtx']
- self.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values())
-
- boundsWidthDict = {}
- if 'glyf' in ttFont:
- glyfTable = ttFont['glyf']
- for name in ttFont.getGlyphOrder():
- g = glyfTable[name]
- if g.numberOfContours == 0:
- continue
- if g.numberOfContours < 0 and not hasattr(g, "xMax"):
- # Composite glyph without extents set.
- # Calculate those.
- g.recalcBounds(glyfTable)
- boundsWidthDict[name] = g.xMax - g.xMin
- elif 'CFF ' in ttFont or 'CFF2' in ttFont:
- if 'CFF ' in ttFont:
- topDict = ttFont['CFF '].cff.topDictIndex[0]
- else:
- topDict = ttFont['CFF2'].cff.topDictIndex[0]
- charStrings = topDict.CharStrings
- for name in ttFont.getGlyphOrder():
- cs = charStrings[name]
- bounds = cs.calcBounds(charStrings)
- if bounds is not None:
- boundsWidthDict[name] = int(
- math.ceil(bounds[2]) - math.floor(bounds[0]))
-
- if boundsWidthDict:
- minLeftSideBearing = float('inf')
- minRightSideBearing = float('inf')
- xMaxExtent = -float('inf')
- for name, boundsWidth in boundsWidthDict.items():
- advanceWidth, lsb = hmtxTable[name]
- rsb = advanceWidth - lsb - boundsWidth
- extent = lsb + boundsWidth
- minLeftSideBearing = min(minLeftSideBearing, lsb)
- minRightSideBearing = min(minRightSideBearing, rsb)
- xMaxExtent = max(xMaxExtent, extent)
- self.minLeftSideBearing = minLeftSideBearing
- self.minRightSideBearing = minRightSideBearing
- self.xMaxExtent = xMaxExtent
-
- else: # No glyph has outlines.
- self.minLeftSideBearing = 0
- self.minRightSideBearing = 0
- self.xMaxExtent = 0
-
- def toXML(self, writer, ttFont):
- formatstring, names, fixes = sstruct.getformat(hheaFormat)
- for name in names:
- value = getattr(self, name)
- if name == "tableVersion":
- value = fi2ve(value)
- value = "0x%08x" % value
- writer.simpletag(name, value=value)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "tableVersion":
- setattr(self, name, ve2fi(attrs["value"]))
- return
- setattr(self, name, safeEval(attrs["value"]))
+ # Note: Keep in sync with table__v_h_e_a
+
+ dependencies = ["hmtx", "glyf", "CFF ", "CFF2"]
+
+ # OpenType spec renamed these, add aliases for compatibility
+ @property
+ def ascender(self):
+ return self.ascent
+
+ @ascender.setter
+ def ascender(self, value):
+ self.ascent = value
+
+ @property
+ def descender(self):
+ return self.descent
+
+ @descender.setter
+ def descender(self, value):
+ self.descent = value
+
+ def decompile(self, data, ttFont):
+ sstruct.unpack(hheaFormat, data, self)
+
+ def compile(self, ttFont):
+ if ttFont.recalcBBoxes and (
+ ttFont.isLoaded("glyf")
+ or ttFont.isLoaded("CFF ")
+ or ttFont.isLoaded("CFF2")
+ ):
+ self.recalc(ttFont)
+ self.tableVersion = fi2ve(self.tableVersion)
+ return sstruct.pack(hheaFormat, self)
+
+ def recalc(self, ttFont):
+ if "hmtx" not in ttFont:
+ return
+
+ hmtxTable = ttFont["hmtx"]
+ self.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values())
+
+ boundsWidthDict = {}
+ if "glyf" in ttFont:
+ glyfTable = ttFont["glyf"]
+ for name in ttFont.getGlyphOrder():
+ g = glyfTable[name]
+ if g.numberOfContours == 0:
+ continue
+ if g.numberOfContours < 0 and not hasattr(g, "xMax"):
+ # Composite glyph without extents set.
+ # Calculate those.
+ g.recalcBounds(glyfTable)
+ boundsWidthDict[name] = g.xMax - g.xMin
+ elif "CFF " in ttFont or "CFF2" in ttFont:
+ if "CFF " in ttFont:
+ topDict = ttFont["CFF "].cff.topDictIndex[0]
+ else:
+ topDict = ttFont["CFF2"].cff.topDictIndex[0]
+ charStrings = topDict.CharStrings
+ for name in ttFont.getGlyphOrder():
+ cs = charStrings[name]
+ bounds = cs.calcBounds(charStrings)
+ if bounds is not None:
+ boundsWidthDict[name] = int(
+ math.ceil(bounds[2]) - math.floor(bounds[0])
+ )
+
+ if boundsWidthDict:
+ minLeftSideBearing = float("inf")
+ minRightSideBearing = float("inf")
+ xMaxExtent = -float("inf")
+ for name, boundsWidth in boundsWidthDict.items():
+ advanceWidth, lsb = hmtxTable[name]
+ rsb = advanceWidth - lsb - boundsWidth
+ extent = lsb + boundsWidth
+ minLeftSideBearing = min(minLeftSideBearing, lsb)
+ minRightSideBearing = min(minRightSideBearing, rsb)
+ xMaxExtent = max(xMaxExtent, extent)
+ self.minLeftSideBearing = minLeftSideBearing
+ self.minRightSideBearing = minRightSideBearing
+ self.xMaxExtent = xMaxExtent
+
+ else: # No glyph has outlines.
+ self.minLeftSideBearing = 0
+ self.minRightSideBearing = 0
+ self.xMaxExtent = 0
+
+ def toXML(self, writer, ttFont):
+ formatstring, names, fixes = sstruct.getformat(hheaFormat)
+ for name in names:
+ value = getattr(self, name)
+ if name == "tableVersion":
+ value = fi2ve(value)
+ value = "0x%08x" % value
+ writer.simpletag(name, value=value)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "tableVersion":
+ setattr(self, name, ve2fi(attrs["value"]))
+ return
+ setattr(self, name, safeEval(attrs["value"]))
diff --git a/Lib/fontTools/ttLib/tables/_h_m_t_x.py b/Lib/fontTools/ttLib/tables/_h_m_t_x.py
index 6980b8d8..2dbdd7f9 100644
--- a/Lib/fontTools/ttLib/tables/_h_m_t_x.py
+++ b/Lib/fontTools/ttLib/tables/_h_m_t_x.py
@@ -12,127 +12,140 @@ log = logging.getLogger(__name__)
class table__h_m_t_x(DefaultTable.DefaultTable):
+ headerTag = "hhea"
+ advanceName = "width"
+ sideBearingName = "lsb"
+ numberOfMetricsName = "numberOfHMetrics"
+ longMetricFormat = "Hh"
- headerTag = 'hhea'
- advanceName = 'width'
- sideBearingName = 'lsb'
- numberOfMetricsName = 'numberOfHMetrics'
- longMetricFormat = 'Hh'
+ def decompile(self, data, ttFont):
+ numGlyphs = ttFont["maxp"].numGlyphs
+ headerTable = ttFont.get(self.headerTag)
+ if headerTable is not None:
+ numberOfMetrics = int(getattr(headerTable, self.numberOfMetricsName))
+ else:
+ numberOfMetrics = numGlyphs
+ if numberOfMetrics > numGlyphs:
+ log.warning(
+ "The %s.%s exceeds the maxp.numGlyphs"
+ % (self.headerTag, self.numberOfMetricsName)
+ )
+ numberOfMetrics = numGlyphs
+ if len(data) < 4 * numberOfMetrics:
+ raise ttLib.TTLibError("not enough '%s' table data" % self.tableTag)
+ # Note: advanceWidth is unsigned, but some font editors might
+ # read/write as signed. We can't be sure whether it was a mistake
+ # or not, so we read as unsigned but also issue a warning...
+ metricsFmt = ">" + self.longMetricFormat * numberOfMetrics
+ metrics = struct.unpack(metricsFmt, data[: 4 * numberOfMetrics])
+ data = data[4 * numberOfMetrics :]
+ numberOfSideBearings = numGlyphs - numberOfMetrics
+ sideBearings = array.array("h", data[: 2 * numberOfSideBearings])
+ data = data[2 * numberOfSideBearings :]
- def decompile(self, data, ttFont):
- numGlyphs = ttFont['maxp'].numGlyphs
- headerTable = ttFont.get(self.headerTag)
- if headerTable is not None:
- numberOfMetrics = int(getattr(headerTable, self.numberOfMetricsName))
- else:
- numberOfMetrics = numGlyphs
- if numberOfMetrics > numGlyphs:
- log.warning("The %s.%s exceeds the maxp.numGlyphs" % (
- self.headerTag, self.numberOfMetricsName))
- numberOfMetrics = numGlyphs
- if len(data) < 4 * numberOfMetrics:
- raise ttLib.TTLibError("not enough '%s' table data" % self.tableTag)
- # Note: advanceWidth is unsigned, but some font editors might
- # read/write as signed. We can't be sure whether it was a mistake
- # or not, so we read as unsigned but also issue a warning...
- metricsFmt = ">" + self.longMetricFormat * numberOfMetrics
- metrics = struct.unpack(metricsFmt, data[:4 * numberOfMetrics])
- data = data[4 * numberOfMetrics:]
- numberOfSideBearings = numGlyphs - numberOfMetrics
- sideBearings = array.array("h", data[:2 * numberOfSideBearings])
- data = data[2 * numberOfSideBearings:]
+ if sys.byteorder != "big":
+ sideBearings.byteswap()
+ if data:
+ log.warning("too much '%s' table data" % self.tableTag)
+ self.metrics = {}
+ glyphOrder = ttFont.getGlyphOrder()
+ for i in range(numberOfMetrics):
+ glyphName = glyphOrder[i]
+ advanceWidth, lsb = metrics[i * 2 : i * 2 + 2]
+ if advanceWidth > 32767:
+ log.warning(
+ "Glyph %r has a huge advance %s (%d); is it intentional or "
+ "an (invalid) negative value?",
+ glyphName,
+ self.advanceName,
+ advanceWidth,
+ )
+ self.metrics[glyphName] = (advanceWidth, lsb)
+ lastAdvance = metrics[-2]
+ for i in range(numberOfSideBearings):
+ glyphName = glyphOrder[i + numberOfMetrics]
+ self.metrics[glyphName] = (lastAdvance, sideBearings[i])
- if sys.byteorder != "big": sideBearings.byteswap()
- if data:
- log.warning("too much '%s' table data" % self.tableTag)
- self.metrics = {}
- glyphOrder = ttFont.getGlyphOrder()
- for i in range(numberOfMetrics):
- glyphName = glyphOrder[i]
- advanceWidth, lsb = metrics[i*2:i*2+2]
- if advanceWidth > 32767:
- log.warning(
- "Glyph %r has a huge advance %s (%d); is it intentional or "
- "an (invalid) negative value?", glyphName, self.advanceName,
- advanceWidth)
- self.metrics[glyphName] = (advanceWidth, lsb)
- lastAdvance = metrics[-2]
- for i in range(numberOfSideBearings):
- glyphName = glyphOrder[i + numberOfMetrics]
- self.metrics[glyphName] = (lastAdvance, sideBearings[i])
+ def compile(self, ttFont):
+ metrics = []
+ hasNegativeAdvances = False
+ for glyphName in ttFont.getGlyphOrder():
+ advanceWidth, sideBearing = self.metrics[glyphName]
+ if advanceWidth < 0:
+ log.error(
+ "Glyph %r has negative advance %s" % (glyphName, self.advanceName)
+ )
+ hasNegativeAdvances = True
+ metrics.append([advanceWidth, sideBearing])
- def compile(self, ttFont):
- metrics = []
- hasNegativeAdvances = False
- for glyphName in ttFont.getGlyphOrder():
- advanceWidth, sideBearing = self.metrics[glyphName]
- if advanceWidth < 0:
- log.error("Glyph %r has negative advance %s" % (
- glyphName, self.advanceName))
- hasNegativeAdvances = True
- metrics.append([advanceWidth, sideBearing])
+ headerTable = ttFont.get(self.headerTag)
+ if headerTable is not None:
+ lastAdvance = metrics[-1][0]
+ lastIndex = len(metrics)
+ while metrics[lastIndex - 2][0] == lastAdvance:
+ lastIndex -= 1
+ if lastIndex <= 1:
+ # all advances are equal
+ lastIndex = 1
+ break
+ additionalMetrics = metrics[lastIndex:]
+ additionalMetrics = [otRound(sb) for _, sb in additionalMetrics]
+ metrics = metrics[:lastIndex]
+ numberOfMetrics = len(metrics)
+ setattr(headerTable, self.numberOfMetricsName, numberOfMetrics)
+ else:
+ # no hhea/vhea, can't store numberOfMetrics; assume == numGlyphs
+ numberOfMetrics = ttFont["maxp"].numGlyphs
+ additionalMetrics = []
- headerTable = ttFont.get(self.headerTag)
- if headerTable is not None:
- lastAdvance = metrics[-1][0]
- lastIndex = len(metrics)
- while metrics[lastIndex-2][0] == lastAdvance:
- lastIndex -= 1
- if lastIndex <= 1:
- # all advances are equal
- lastIndex = 1
- break
- additionalMetrics = metrics[lastIndex:]
- additionalMetrics = [otRound(sb) for _, sb in additionalMetrics]
- metrics = metrics[:lastIndex]
- numberOfMetrics = len(metrics)
- setattr(headerTable, self.numberOfMetricsName, numberOfMetrics)
- else:
- # no hhea/vhea, can't store numberOfMetrics; assume == numGlyphs
- numberOfMetrics = ttFont["maxp"].numGlyphs
- additionalMetrics = []
+ allMetrics = []
+ for advance, sb in metrics:
+ allMetrics.extend([otRound(advance), otRound(sb)])
+ metricsFmt = ">" + self.longMetricFormat * numberOfMetrics
+ try:
+ data = struct.pack(metricsFmt, *allMetrics)
+ except struct.error as e:
+ if "out of range" in str(e) and hasNegativeAdvances:
+ raise ttLib.TTLibError(
+ "'%s' table can't contain negative advance %ss"
+ % (self.tableTag, self.advanceName)
+ )
+ else:
+ raise
+ additionalMetrics = array.array("h", additionalMetrics)
+ if sys.byteorder != "big":
+ additionalMetrics.byteswap()
+ data = data + additionalMetrics.tobytes()
+ return data
- allMetrics = []
- for advance, sb in metrics:
- allMetrics.extend([otRound(advance), otRound(sb)])
- metricsFmt = ">" + self.longMetricFormat * numberOfMetrics
- try:
- data = struct.pack(metricsFmt, *allMetrics)
- except struct.error as e:
- if "out of range" in str(e) and hasNegativeAdvances:
- raise ttLib.TTLibError(
- "'%s' table can't contain negative advance %ss"
- % (self.tableTag, self.advanceName))
- else:
- raise
- additionalMetrics = array.array("h", additionalMetrics)
- if sys.byteorder != "big": additionalMetrics.byteswap()
- data = data + additionalMetrics.tobytes()
- return data
+ def toXML(self, writer, ttFont):
+ names = sorted(self.metrics.keys())
+ for glyphName in names:
+ advance, sb = self.metrics[glyphName]
+ writer.simpletag(
+ "mtx",
+ [
+ ("name", glyphName),
+ (self.advanceName, advance),
+ (self.sideBearingName, sb),
+ ],
+ )
+ writer.newline()
- def toXML(self, writer, ttFont):
- names = sorted(self.metrics.keys())
- for glyphName in names:
- advance, sb = self.metrics[glyphName]
- writer.simpletag("mtx", [
- ("name", glyphName),
- (self.advanceName, advance),
- (self.sideBearingName, sb),
- ])
- writer.newline()
+ def fromXML(self, name, attrs, content, ttFont):
+ if not hasattr(self, "metrics"):
+ self.metrics = {}
+ if name == "mtx":
+ self.metrics[attrs["name"]] = (
+ safeEval(attrs[self.advanceName]),
+ safeEval(attrs[self.sideBearingName]),
+ )
- def fromXML(self, name, attrs, content, ttFont):
- if not hasattr(self, "metrics"):
- self.metrics = {}
- if name == "mtx":
- self.metrics[attrs["name"]] = (safeEval(attrs[self.advanceName]),
- safeEval(attrs[self.sideBearingName]))
+ def __delitem__(self, glyphName):
+ del self.metrics[glyphName]
- def __delitem__(self, glyphName):
- del self.metrics[glyphName]
+ def __getitem__(self, glyphName):
+ return self.metrics[glyphName]
- def __getitem__(self, glyphName):
- return self.metrics[glyphName]
-
- def __setitem__(self, glyphName, advance_sb_pair):
- self.metrics[glyphName] = tuple(advance_sb_pair)
+ def __setitem__(self, glyphName, advance_sb_pair):
+ self.metrics[glyphName] = tuple(advance_sb_pair)
diff --git a/Lib/fontTools/ttLib/tables/_k_e_r_n.py b/Lib/fontTools/ttLib/tables/_k_e_r_n.py
index bcad2cea..8f55a311 100644
--- a/Lib/fontTools/ttLib/tables/_k_e_r_n.py
+++ b/Lib/fontTools/ttLib/tables/_k_e_r_n.py
@@ -1,8 +1,6 @@
from fontTools.ttLib import getSearchRange
from fontTools.misc.textTools import safeEval, readHex
-from fontTools.misc.fixedTools import (
- fixedToFloat as fi2fl,
- floatToFixed as fl2fi)
+from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi
from . import DefaultTable
import struct
import sys
@@ -14,273 +12,267 @@ log = logging.getLogger(__name__)
class table__k_e_r_n(DefaultTable.DefaultTable):
-
- def getkern(self, format):
- for subtable in self.kernTables:
- if subtable.format == format:
- return subtable
- return None # not found
-
- def decompile(self, data, ttFont):
- version, nTables = struct.unpack(">HH", data[:4])
- apple = False
- if (len(data) >= 8) and (version == 1):
- # AAT Apple's "new" format. Hm.
- version, nTables = struct.unpack(">LL", data[:8])
- self.version = fi2fl(version, 16)
- data = data[8:]
- apple = True
- else:
- self.version = version
- data = data[4:]
- self.kernTables = []
- for i in range(nTables):
- if self.version == 1.0:
- # Apple
- length, coverage, subtableFormat = struct.unpack(
- ">LBB", data[:6])
- else:
- # in OpenType spec the "version" field refers to the common
- # subtable header; the actual subtable format is stored in
- # the 8-15 mask bits of "coverage" field.
- # This "version" is always 0 so we ignore it here
- _, length, subtableFormat, coverage = struct.unpack(
- ">HHBB", data[:6])
- if nTables == 1 and subtableFormat == 0:
- # The "length" value is ignored since some fonts
- # (like OpenSans and Calibri) have a subtable larger than
- # its value.
- nPairs, = struct.unpack(">H", data[6:8])
- calculated_length = (nPairs * 6) + 14
- if length != calculated_length:
- log.warning(
- "'kern' subtable longer than defined: "
- "%d bytes instead of %d bytes" %
- (calculated_length, length)
- )
- length = calculated_length
- if subtableFormat not in kern_classes:
- subtable = KernTable_format_unkown(subtableFormat)
- else:
- subtable = kern_classes[subtableFormat](apple)
- subtable.decompile(data[:length], ttFont)
- self.kernTables.append(subtable)
- data = data[length:]
-
- def compile(self, ttFont):
- if hasattr(self, "kernTables"):
- nTables = len(self.kernTables)
- else:
- nTables = 0
- if self.version == 1.0:
- # AAT Apple's "new" format.
- data = struct.pack(">LL", fl2fi(self.version, 16), nTables)
- else:
- data = struct.pack(">HH", self.version, nTables)
- if hasattr(self, "kernTables"):
- for subtable in self.kernTables:
- data = data + subtable.compile(ttFont)
- return data
-
- def toXML(self, writer, ttFont):
- writer.simpletag("version", value=self.version)
- writer.newline()
- for subtable in self.kernTables:
- subtable.toXML(writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "version":
- self.version = safeEval(attrs["value"])
- return
- if name != "kernsubtable":
- return
- if not hasattr(self, "kernTables"):
- self.kernTables = []
- format = safeEval(attrs["format"])
- if format not in kern_classes:
- subtable = KernTable_format_unkown(format)
- else:
- apple = self.version == 1.0
- subtable = kern_classes[format](apple)
- self.kernTables.append(subtable)
- subtable.fromXML(name, attrs, content, ttFont)
+ def getkern(self, format):
+ for subtable in self.kernTables:
+ if subtable.format == format:
+ return subtable
+ return None # not found
+
+ def decompile(self, data, ttFont):
+ version, nTables = struct.unpack(">HH", data[:4])
+ apple = False
+ if (len(data) >= 8) and (version == 1):
+ # AAT Apple's "new" format. Hm.
+ version, nTables = struct.unpack(">LL", data[:8])
+ self.version = fi2fl(version, 16)
+ data = data[8:]
+ apple = True
+ else:
+ self.version = version
+ data = data[4:]
+ self.kernTables = []
+ for i in range(nTables):
+ if self.version == 1.0:
+ # Apple
+ length, coverage, subtableFormat = struct.unpack(">LBB", data[:6])
+ else:
+ # in OpenType spec the "version" field refers to the common
+ # subtable header; the actual subtable format is stored in
+ # the 8-15 mask bits of "coverage" field.
+ # This "version" is always 0 so we ignore it here
+ _, length, subtableFormat, coverage = struct.unpack(">HHBB", data[:6])
+ if nTables == 1 and subtableFormat == 0:
+ # The "length" value is ignored since some fonts
+ # (like OpenSans and Calibri) have a subtable larger than
+ # its value.
+ (nPairs,) = struct.unpack(">H", data[6:8])
+ calculated_length = (nPairs * 6) + 14
+ if length != calculated_length:
+ log.warning(
+ "'kern' subtable longer than defined: "
+ "%d bytes instead of %d bytes" % (calculated_length, length)
+ )
+ length = calculated_length
+ if subtableFormat not in kern_classes:
+ subtable = KernTable_format_unkown(subtableFormat)
+ else:
+ subtable = kern_classes[subtableFormat](apple)
+ subtable.decompile(data[:length], ttFont)
+ self.kernTables.append(subtable)
+ data = data[length:]
+
+ def compile(self, ttFont):
+ if hasattr(self, "kernTables"):
+ nTables = len(self.kernTables)
+ else:
+ nTables = 0
+ if self.version == 1.0:
+ # AAT Apple's "new" format.
+ data = struct.pack(">LL", fl2fi(self.version, 16), nTables)
+ else:
+ data = struct.pack(">HH", self.version, nTables)
+ if hasattr(self, "kernTables"):
+ for subtable in self.kernTables:
+ data = data + subtable.compile(ttFont)
+ return data
+
+ def toXML(self, writer, ttFont):
+ writer.simpletag("version", value=self.version)
+ writer.newline()
+ for subtable in self.kernTables:
+ subtable.toXML(writer, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "version":
+ self.version = safeEval(attrs["value"])
+ return
+ if name != "kernsubtable":
+ return
+ if not hasattr(self, "kernTables"):
+ self.kernTables = []
+ format = safeEval(attrs["format"])
+ if format not in kern_classes:
+ subtable = KernTable_format_unkown(format)
+ else:
+ apple = self.version == 1.0
+ subtable = kern_classes[format](apple)
+ self.kernTables.append(subtable)
+ subtable.fromXML(name, attrs, content, ttFont)
class KernTable_format_0(object):
-
- # 'version' is kept for backward compatibility
- version = format = 0
-
- def __init__(self, apple=False):
- self.apple = apple
-
- def decompile(self, data, ttFont):
- if not self.apple:
- version, length, subtableFormat, coverage = struct.unpack(
- ">HHBB", data[:6])
- if version != 0:
- from fontTools.ttLib import TTLibError
- raise TTLibError(
- "unsupported kern subtable version: %d" % version)
- tupleIndex = None
- # Should we also assert length == len(data)?
- data = data[6:]
- else:
- length, coverage, subtableFormat, tupleIndex = struct.unpack(
- ">LBBH", data[:8])
- data = data[8:]
- assert self.format == subtableFormat, "unsupported format"
- self.coverage = coverage
- self.tupleIndex = tupleIndex
-
- self.kernTable = kernTable = {}
-
- nPairs, searchRange, entrySelector, rangeShift = struct.unpack(
- ">HHHH", data[:8])
- data = data[8:]
-
- datas = array.array("H", data[:6 * nPairs])
- if sys.byteorder != "big": datas.byteswap()
- it = iter(datas)
- glyphOrder = ttFont.getGlyphOrder()
- for k in range(nPairs):
- left, right, value = next(it), next(it), next(it)
- if value >= 32768:
- value -= 65536
- try:
- kernTable[(glyphOrder[left], glyphOrder[right])] = value
- except IndexError:
- # Slower, but will not throw an IndexError on an invalid
- # glyph id.
- kernTable[(
- ttFont.getGlyphName(left),
- ttFont.getGlyphName(right))] = value
- if len(data) > 6 * nPairs + 4: # Ignore up to 4 bytes excess
- log.warning(
- "excess data in 'kern' subtable: %d bytes",
- len(data) - 6 * nPairs)
-
- def compile(self, ttFont):
- nPairs = min(len(self.kernTable), 0xFFFF)
- searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6)
- searchRange &= 0xFFFF
- entrySelector = min(entrySelector, 0xFFFF)
- rangeShift = min(rangeShift, 0xFFFF)
- data = struct.pack(
- ">HHHH", nPairs, searchRange, entrySelector, rangeShift)
-
- # yeehee! (I mean, turn names into indices)
- try:
- reverseOrder = ttFont.getReverseGlyphMap()
- kernTable = sorted(
- (reverseOrder[left], reverseOrder[right], value)
- for ((left, right), value) in self.kernTable.items())
- except KeyError:
- # Slower, but will not throw KeyError on invalid glyph id.
- getGlyphID = ttFont.getGlyphID
- kernTable = sorted(
- (getGlyphID(left), getGlyphID(right), value)
- for ((left, right), value) in self.kernTable.items())
-
- for left, right, value in kernTable:
- data = data + struct.pack(">HHh", left, right, value)
-
- if not self.apple:
- version = 0
- length = len(data) + 6
- if length >= 0x10000:
- log.warning('"kern" subtable overflow, '
- 'truncating length value while preserving pairs.')
- length &= 0xFFFF
- header = struct.pack(
- ">HHBB", version, length, self.format, self.coverage)
- else:
- if self.tupleIndex is None:
- # sensible default when compiling a TTX from an old fonttools
- # or when inserting a Windows-style format 0 subtable into an
- # Apple version=1.0 kern table
- log.warning("'tupleIndex' is None; default to 0")
- self.tupleIndex = 0
- length = len(data) + 8
- header = struct.pack(
- ">LBBH", length, self.coverage, self.format, self.tupleIndex)
- return header + data
-
- def toXML(self, writer, ttFont):
- attrs = dict(coverage=self.coverage, format=self.format)
- if self.apple:
- if self.tupleIndex is None:
- log.warning("'tupleIndex' is None; default to 0")
- attrs["tupleIndex"] = 0
- else:
- attrs["tupleIndex"] = self.tupleIndex
- writer.begintag("kernsubtable", **attrs)
- writer.newline()
- items = sorted(self.kernTable.items())
- for (left, right), value in items:
- writer.simpletag("pair", [
- ("l", left),
- ("r", right),
- ("v", value)
- ])
- writer.newline()
- writer.endtag("kernsubtable")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.coverage = safeEval(attrs["coverage"])
- subtableFormat = safeEval(attrs["format"])
- if self.apple:
- if "tupleIndex" in attrs:
- self.tupleIndex = safeEval(attrs["tupleIndex"])
- else:
- # previous fontTools versions didn't export tupleIndex
- log.warning(
- "Apple kern subtable is missing 'tupleIndex' attribute")
- self.tupleIndex = None
- else:
- self.tupleIndex = None
- assert subtableFormat == self.format, "unsupported format"
- if not hasattr(self, "kernTable"):
- self.kernTable = {}
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"])
-
- def __getitem__(self, pair):
- return self.kernTable[pair]
-
- def __setitem__(self, pair, value):
- self.kernTable[pair] = value
-
- def __delitem__(self, pair):
- del self.kernTable[pair]
+ # 'version' is kept for backward compatibility
+ version = format = 0
+
+ def __init__(self, apple=False):
+ self.apple = apple
+
+ def decompile(self, data, ttFont):
+ if not self.apple:
+ version, length, subtableFormat, coverage = struct.unpack(">HHBB", data[:6])
+ if version != 0:
+ from fontTools.ttLib import TTLibError
+
+ raise TTLibError("unsupported kern subtable version: %d" % version)
+ tupleIndex = None
+ # Should we also assert length == len(data)?
+ data = data[6:]
+ else:
+ length, coverage, subtableFormat, tupleIndex = struct.unpack(
+ ">LBBH", data[:8]
+ )
+ data = data[8:]
+ assert self.format == subtableFormat, "unsupported format"
+ self.coverage = coverage
+ self.tupleIndex = tupleIndex
+
+ self.kernTable = kernTable = {}
+
+ nPairs, searchRange, entrySelector, rangeShift = struct.unpack(
+ ">HHHH", data[:8]
+ )
+ data = data[8:]
+
+ datas = array.array("H", data[: 6 * nPairs])
+ if sys.byteorder != "big":
+ datas.byteswap()
+ it = iter(datas)
+ glyphOrder = ttFont.getGlyphOrder()
+ for k in range(nPairs):
+ left, right, value = next(it), next(it), next(it)
+ if value >= 32768:
+ value -= 65536
+ try:
+ kernTable[(glyphOrder[left], glyphOrder[right])] = value
+ except IndexError:
+ # Slower, but will not throw an IndexError on an invalid
+ # glyph id.
+ kernTable[
+ (ttFont.getGlyphName(left), ttFont.getGlyphName(right))
+ ] = value
+ if len(data) > 6 * nPairs + 4: # Ignore up to 4 bytes excess
+ log.warning(
+ "excess data in 'kern' subtable: %d bytes", len(data) - 6 * nPairs
+ )
+
+ def compile(self, ttFont):
+ nPairs = min(len(self.kernTable), 0xFFFF)
+ searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6)
+ searchRange &= 0xFFFF
+ entrySelector = min(entrySelector, 0xFFFF)
+ rangeShift = min(rangeShift, 0xFFFF)
+ data = struct.pack(">HHHH", nPairs, searchRange, entrySelector, rangeShift)
+
+ # yeehee! (I mean, turn names into indices)
+ try:
+ reverseOrder = ttFont.getReverseGlyphMap()
+ kernTable = sorted(
+ (reverseOrder[left], reverseOrder[right], value)
+ for ((left, right), value) in self.kernTable.items()
+ )
+ except KeyError:
+ # Slower, but will not throw KeyError on invalid glyph id.
+ getGlyphID = ttFont.getGlyphID
+ kernTable = sorted(
+ (getGlyphID(left), getGlyphID(right), value)
+ for ((left, right), value) in self.kernTable.items()
+ )
+
+ for left, right, value in kernTable:
+ data = data + struct.pack(">HHh", left, right, value)
+
+ if not self.apple:
+ version = 0
+ length = len(data) + 6
+ if length >= 0x10000:
+ log.warning(
+ '"kern" subtable overflow, '
+ "truncating length value while preserving pairs."
+ )
+ length &= 0xFFFF
+ header = struct.pack(">HHBB", version, length, self.format, self.coverage)
+ else:
+ if self.tupleIndex is None:
+ # sensible default when compiling a TTX from an old fonttools
+ # or when inserting a Windows-style format 0 subtable into an
+ # Apple version=1.0 kern table
+ log.warning("'tupleIndex' is None; default to 0")
+ self.tupleIndex = 0
+ length = len(data) + 8
+ header = struct.pack(
+ ">LBBH", length, self.coverage, self.format, self.tupleIndex
+ )
+ return header + data
+
+ def toXML(self, writer, ttFont):
+ attrs = dict(coverage=self.coverage, format=self.format)
+ if self.apple:
+ if self.tupleIndex is None:
+ log.warning("'tupleIndex' is None; default to 0")
+ attrs["tupleIndex"] = 0
+ else:
+ attrs["tupleIndex"] = self.tupleIndex
+ writer.begintag("kernsubtable", **attrs)
+ writer.newline()
+ items = sorted(self.kernTable.items())
+ for (left, right), value in items:
+ writer.simpletag("pair", [("l", left), ("r", right), ("v", value)])
+ writer.newline()
+ writer.endtag("kernsubtable")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.coverage = safeEval(attrs["coverage"])
+ subtableFormat = safeEval(attrs["format"])
+ if self.apple:
+ if "tupleIndex" in attrs:
+ self.tupleIndex = safeEval(attrs["tupleIndex"])
+ else:
+ # previous fontTools versions didn't export tupleIndex
+ log.warning("Apple kern subtable is missing 'tupleIndex' attribute")
+ self.tupleIndex = None
+ else:
+ self.tupleIndex = None
+ assert subtableFormat == self.format, "unsupported format"
+ if not hasattr(self, "kernTable"):
+ self.kernTable = {}
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"])
+
+ def __getitem__(self, pair):
+ return self.kernTable[pair]
+
+ def __setitem__(self, pair, value):
+ self.kernTable[pair] = value
+
+ def __delitem__(self, pair):
+ del self.kernTable[pair]
class KernTable_format_unkown(object):
-
- def __init__(self, format):
- self.format = format
-
- def decompile(self, data, ttFont):
- self.data = data
-
- def compile(self, ttFont):
- return self.data
-
- def toXML(self, writer, ttFont):
- writer.begintag("kernsubtable", format=self.format)
- writer.newline()
- writer.comment("unknown 'kern' subtable format")
- writer.newline()
- writer.dumphex(self.data)
- writer.endtag("kernsubtable")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.decompile(readHex(content), ttFont)
+ def __init__(self, format):
+ self.format = format
+
+ def decompile(self, data, ttFont):
+ self.data = data
+
+ def compile(self, ttFont):
+ return self.data
+
+ def toXML(self, writer, ttFont):
+ writer.begintag("kernsubtable", format=self.format)
+ writer.newline()
+ writer.comment("unknown 'kern' subtable format")
+ writer.newline()
+ writer.dumphex(self.data)
+ writer.endtag("kernsubtable")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.decompile(readHex(content), ttFont)
kern_classes = {0: KernTable_format_0}
diff --git a/Lib/fontTools/ttLib/tables/_l_c_a_r.py b/Lib/fontTools/ttLib/tables/_l_c_a_r.py
index e63310ef..1323b670 100644
--- a/Lib/fontTools/ttLib/tables/_l_c_a_r.py
+++ b/Lib/fontTools/ttLib/tables/_l_c_a_r.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table__l_c_a_r(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/_l_o_c_a.py b/Lib/fontTools/ttLib/tables/_l_o_c_a.py
index 6a8693ed..5884cef4 100644
--- a/Lib/fontTools/ttLib/tables/_l_o_c_a.py
+++ b/Lib/fontTools/ttLib/tables/_l_o_c_a.py
@@ -8,54 +8,58 @@ log = logging.getLogger(__name__)
class table__l_o_c_a(DefaultTable.DefaultTable):
+ dependencies = ["glyf"]
- dependencies = ['glyf']
-
- def decompile(self, data, ttFont):
- longFormat = ttFont['head'].indexToLocFormat
- if longFormat:
- format = "I"
- else:
- format = "H"
- locations = array.array(format)
- locations.frombytes(data)
- if sys.byteorder != "big": locations.byteswap()
- if not longFormat:
- l = array.array("I")
- for i in range(len(locations)):
- l.append(locations[i] * 2)
- locations = l
- if len(locations) < (ttFont['maxp'].numGlyphs + 1):
- log.warning("corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d",
- len(locations) - 1, ttFont['maxp'].numGlyphs)
- self.locations = locations
-
- def compile(self, ttFont):
- try:
- max_location = max(self.locations)
- except AttributeError:
- self.set([])
- max_location = 0
- if max_location < 0x20000 and all(l % 2 == 0 for l in self.locations):
- locations = array.array("H")
- for i in range(len(self.locations)):
- locations.append(self.locations[i] // 2)
- ttFont['head'].indexToLocFormat = 0
- else:
- locations = array.array("I", self.locations)
- ttFont['head'].indexToLocFormat = 1
- if sys.byteorder != "big": locations.byteswap()
- return locations.tobytes()
-
- def set(self, locations):
- self.locations = array.array("I", locations)
-
- def toXML(self, writer, ttFont):
- writer.comment("The 'loca' table will be calculated by the compiler")
- writer.newline()
-
- def __getitem__(self, index):
- return self.locations[index]
-
- def __len__(self):
- return len(self.locations)
+ def decompile(self, data, ttFont):
+ longFormat = ttFont["head"].indexToLocFormat
+ if longFormat:
+ format = "I"
+ else:
+ format = "H"
+ locations = array.array(format)
+ locations.frombytes(data)
+ if sys.byteorder != "big":
+ locations.byteswap()
+ if not longFormat:
+ l = array.array("I")
+ for i in range(len(locations)):
+ l.append(locations[i] * 2)
+ locations = l
+ if len(locations) < (ttFont["maxp"].numGlyphs + 1):
+ log.warning(
+ "corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d",
+ len(locations) - 1,
+ ttFont["maxp"].numGlyphs,
+ )
+ self.locations = locations
+
+ def compile(self, ttFont):
+ try:
+ max_location = max(self.locations)
+ except AttributeError:
+ self.set([])
+ max_location = 0
+ if max_location < 0x20000 and all(l % 2 == 0 for l in self.locations):
+ locations = array.array("H")
+ for i in range(len(self.locations)):
+ locations.append(self.locations[i] // 2)
+ ttFont["head"].indexToLocFormat = 0
+ else:
+ locations = array.array("I", self.locations)
+ ttFont["head"].indexToLocFormat = 1
+ if sys.byteorder != "big":
+ locations.byteswap()
+ return locations.tobytes()
+
+ def set(self, locations):
+ self.locations = array.array("I", locations)
+
+ def toXML(self, writer, ttFont):
+ writer.comment("The 'loca' table will be calculated by the compiler")
+ writer.newline()
+
+ def __getitem__(self, index):
+ return self.locations[index]
+
+ def __len__(self):
+ return len(self.locations)
diff --git a/Lib/fontTools/ttLib/tables/_l_t_a_g.py b/Lib/fontTools/ttLib/tables/_l_t_a_g.py
index ce3c6b97..24f5e131 100644
--- a/Lib/fontTools/ttLib/tables/_l_t_a_g.py
+++ b/Lib/fontTools/ttLib/tables/_l_t_a_g.py
@@ -4,60 +4,61 @@ import struct
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ltag.html
+
class table__l_t_a_g(DefaultTable.DefaultTable):
- def __init__(self, tag=None):
- DefaultTable.DefaultTable.__init__(self, tag)
- self.version, self.flags = 1, 0
- self.tags = []
-
- def addTag(self, tag):
- """Add 'tag' to the list of langauge tags if not already there.
-
- Returns the integer index of 'tag' in the list of all tags.
- """
- try:
- return self.tags.index(tag)
- except ValueError:
- self.tags.append(tag)
- return len(self.tags) - 1
-
- def decompile(self, data, ttFont):
- self.version, self.flags, numTags = struct.unpack(">LLL", data[:12])
- assert self.version == 1
- self.tags = []
- for i in range(numTags):
- pos = 12 + i * 4
- offset, length = struct.unpack(">HH", data[pos:pos+4])
- tag = data[offset:offset+length].decode("ascii")
- self.tags.append(tag)
-
- def compile(self, ttFont):
- dataList = [struct.pack(">LLL", self.version, self.flags, len(self.tags))]
- stringPool = ""
- for tag in self.tags:
- offset = stringPool.find(tag)
- if offset < 0:
- offset = len(stringPool)
- stringPool = stringPool + tag
- offset = offset + 12 + len(self.tags) * 4
- dataList.append(struct.pack(">HH", offset, len(tag)))
- dataList.append(tobytes(stringPool))
- return bytesjoin(dataList)
-
- def toXML(self, writer, ttFont):
- writer.simpletag("version", value=self.version)
- writer.newline()
- writer.simpletag("flags", value=self.flags)
- writer.newline()
- for tag in self.tags:
- writer.simpletag("LanguageTag", tag=tag)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if not hasattr(self, "tags"):
- self.tags = []
- if name == "LanguageTag":
- self.tags.append(attrs["tag"])
- elif "value" in attrs:
- value = safeEval(attrs["value"])
- setattr(self, name, value)
+ def __init__(self, tag=None):
+ DefaultTable.DefaultTable.__init__(self, tag)
+ self.version, self.flags = 1, 0
+ self.tags = []
+
+ def addTag(self, tag):
+ """Add 'tag' to the list of langauge tags if not already there.
+
+ Returns the integer index of 'tag' in the list of all tags.
+ """
+ try:
+ return self.tags.index(tag)
+ except ValueError:
+ self.tags.append(tag)
+ return len(self.tags) - 1
+
+ def decompile(self, data, ttFont):
+ self.version, self.flags, numTags = struct.unpack(">LLL", data[:12])
+ assert self.version == 1
+ self.tags = []
+ for i in range(numTags):
+ pos = 12 + i * 4
+ offset, length = struct.unpack(">HH", data[pos : pos + 4])
+ tag = data[offset : offset + length].decode("ascii")
+ self.tags.append(tag)
+
+ def compile(self, ttFont):
+ dataList = [struct.pack(">LLL", self.version, self.flags, len(self.tags))]
+ stringPool = ""
+ for tag in self.tags:
+ offset = stringPool.find(tag)
+ if offset < 0:
+ offset = len(stringPool)
+ stringPool = stringPool + tag
+ offset = offset + 12 + len(self.tags) * 4
+ dataList.append(struct.pack(">HH", offset, len(tag)))
+ dataList.append(tobytes(stringPool))
+ return bytesjoin(dataList)
+
+ def toXML(self, writer, ttFont):
+ writer.simpletag("version", value=self.version)
+ writer.newline()
+ writer.simpletag("flags", value=self.flags)
+ writer.newline()
+ for tag in self.tags:
+ writer.simpletag("LanguageTag", tag=tag)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if not hasattr(self, "tags"):
+ self.tags = []
+ if name == "LanguageTag":
+ self.tags.append(attrs["tag"])
+ elif "value" in attrs:
+ value = safeEval(attrs["value"])
+ setattr(self, name, value)
diff --git a/Lib/fontTools/ttLib/tables/_m_a_x_p.py b/Lib/fontTools/ttLib/tables/_m_a_x_p.py
index e810806d..f0e6c33a 100644
--- a/Lib/fontTools/ttLib/tables/_m_a_x_p.py
+++ b/Lib/fontTools/ttLib/tables/_m_a_x_p.py
@@ -27,112 +27,113 @@ maxpFormat_1_0_add = """
class table__m_a_x_p(DefaultTable.DefaultTable):
+ dependencies = ["glyf"]
- dependencies = ['glyf']
+ def decompile(self, data, ttFont):
+ dummy, data = sstruct.unpack2(maxpFormat_0_5, data, self)
+ self.numGlyphs = int(self.numGlyphs)
+ if self.tableVersion != 0x00005000:
+ dummy, data = sstruct.unpack2(maxpFormat_1_0_add, data, self)
+ assert len(data) == 0
- def decompile(self, data, ttFont):
- dummy, data = sstruct.unpack2(maxpFormat_0_5, data, self)
- self.numGlyphs = int(self.numGlyphs)
- if self.tableVersion != 0x00005000:
- dummy, data = sstruct.unpack2(maxpFormat_1_0_add, data, self)
- assert len(data) == 0
+ def compile(self, ttFont):
+ if "glyf" in ttFont:
+ if ttFont.isLoaded("glyf") and ttFont.recalcBBoxes:
+ self.recalc(ttFont)
+ else:
+ pass # CFF
+ self.numGlyphs = len(ttFont.getGlyphOrder())
+ if self.tableVersion != 0x00005000:
+ self.tableVersion = 0x00010000
+ data = sstruct.pack(maxpFormat_0_5, self)
+ if self.tableVersion == 0x00010000:
+ data = data + sstruct.pack(maxpFormat_1_0_add, self)
+ return data
- def compile(self, ttFont):
- if 'glyf' in ttFont:
- if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes:
- self.recalc(ttFont)
- else:
- pass # CFF
- self.numGlyphs = len(ttFont.getGlyphOrder())
- if self.tableVersion != 0x00005000:
- self.tableVersion = 0x00010000
- data = sstruct.pack(maxpFormat_0_5, self)
- if self.tableVersion == 0x00010000:
- data = data + sstruct.pack(maxpFormat_1_0_add, self)
- return data
+ def recalc(self, ttFont):
+ """Recalculate the font bounding box, and most other maxp values except
+ for the TT instructions values. Also recalculate the value of bit 1
+ of the flags field and the font bounding box of the 'head' table.
+ """
+ glyfTable = ttFont["glyf"]
+ hmtxTable = ttFont["hmtx"]
+ headTable = ttFont["head"]
+ self.numGlyphs = len(glyfTable)
+ INFINITY = 100000
+ xMin = +INFINITY
+ yMin = +INFINITY
+ xMax = -INFINITY
+ yMax = -INFINITY
+ maxPoints = 0
+ maxContours = 0
+ maxCompositePoints = 0
+ maxCompositeContours = 0
+ maxComponentElements = 0
+ maxComponentDepth = 0
+ allXMinIsLsb = 1
+ for glyphName in ttFont.getGlyphOrder():
+ g = glyfTable[glyphName]
+ if g.numberOfContours:
+ if hmtxTable[glyphName][1] != g.xMin:
+ allXMinIsLsb = 0
+ xMin = min(xMin, g.xMin)
+ yMin = min(yMin, g.yMin)
+ xMax = max(xMax, g.xMax)
+ yMax = max(yMax, g.yMax)
+ if g.numberOfContours > 0:
+ nPoints, nContours = g.getMaxpValues()
+ maxPoints = max(maxPoints, nPoints)
+ maxContours = max(maxContours, nContours)
+ elif g.isComposite():
+ nPoints, nContours, componentDepth = g.getCompositeMaxpValues(
+ glyfTable
+ )
+ maxCompositePoints = max(maxCompositePoints, nPoints)
+ maxCompositeContours = max(maxCompositeContours, nContours)
+ maxComponentElements = max(maxComponentElements, len(g.components))
+ maxComponentDepth = max(maxComponentDepth, componentDepth)
+ if xMin == +INFINITY:
+ headTable.xMin = 0
+ headTable.yMin = 0
+ headTable.xMax = 0
+ headTable.yMax = 0
+ else:
+ headTable.xMin = xMin
+ headTable.yMin = yMin
+ headTable.xMax = xMax
+ headTable.yMax = yMax
+ self.maxPoints = maxPoints
+ self.maxContours = maxContours
+ self.maxCompositePoints = maxCompositePoints
+ self.maxCompositeContours = maxCompositeContours
+ self.maxComponentElements = maxComponentElements
+ self.maxComponentDepth = maxComponentDepth
+ if allXMinIsLsb:
+ headTable.flags = headTable.flags | 0x2
+ else:
+ headTable.flags = headTable.flags & ~0x2
- def recalc(self, ttFont):
- """Recalculate the font bounding box, and most other maxp values except
- for the TT instructions values. Also recalculate the value of bit 1
- of the flags field and the font bounding box of the 'head' table.
- """
- glyfTable = ttFont['glyf']
- hmtxTable = ttFont['hmtx']
- headTable = ttFont['head']
- self.numGlyphs = len(glyfTable)
- INFINITY = 100000
- xMin = +INFINITY
- yMin = +INFINITY
- xMax = -INFINITY
- yMax = -INFINITY
- maxPoints = 0
- maxContours = 0
- maxCompositePoints = 0
- maxCompositeContours = 0
- maxComponentElements = 0
- maxComponentDepth = 0
- allXMinIsLsb = 1
- for glyphName in ttFont.getGlyphOrder():
- g = glyfTable[glyphName]
- if g.numberOfContours:
- if hmtxTable[glyphName][1] != g.xMin:
- allXMinIsLsb = 0
- xMin = min(xMin, g.xMin)
- yMin = min(yMin, g.yMin)
- xMax = max(xMax, g.xMax)
- yMax = max(yMax, g.yMax)
- if g.numberOfContours > 0:
- nPoints, nContours = g.getMaxpValues()
- maxPoints = max(maxPoints, nPoints)
- maxContours = max(maxContours, nContours)
- else:
- nPoints, nContours, componentDepth = g.getCompositeMaxpValues(glyfTable)
- maxCompositePoints = max(maxCompositePoints, nPoints)
- maxCompositeContours = max(maxCompositeContours, nContours)
- maxComponentElements = max(maxComponentElements, len(g.components))
- maxComponentDepth = max(maxComponentDepth, componentDepth)
- if xMin == +INFINITY:
- headTable.xMin = 0
- headTable.yMin = 0
- headTable.xMax = 0
- headTable.yMax = 0
- else:
- headTable.xMin = xMin
- headTable.yMin = yMin
- headTable.xMax = xMax
- headTable.yMax = yMax
- self.maxPoints = maxPoints
- self.maxContours = maxContours
- self.maxCompositePoints = maxCompositePoints
- self.maxCompositeContours = maxCompositeContours
- self.maxComponentElements = maxComponentElements
- self.maxComponentDepth = maxComponentDepth
- if allXMinIsLsb:
- headTable.flags = headTable.flags | 0x2
- else:
- headTable.flags = headTable.flags & ~0x2
+ def testrepr(self):
+ items = sorted(self.__dict__.items())
+ print(". . . . . . . . .")
+ for combo in items:
+ print(" %s: %s" % combo)
+ print(". . . . . . . . .")
- def testrepr(self):
- items = sorted(self.__dict__.items())
- print(". . . . . . . . .")
- for combo in items:
- print(" %s: %s" % combo)
- print(". . . . . . . . .")
+ def toXML(self, writer, ttFont):
+ if self.tableVersion != 0x00005000:
+ writer.comment("Most of this table will be recalculated by the compiler")
+ writer.newline()
+ formatstring, names, fixes = sstruct.getformat(maxpFormat_0_5)
+ if self.tableVersion != 0x00005000:
+ formatstring, names_1_0, fixes = sstruct.getformat(maxpFormat_1_0_add)
+ names = names + names_1_0
+ for name in names:
+ value = getattr(self, name)
+ if name == "tableVersion":
+ value = hex(value)
+ writer.simpletag(name, value=value)
+ writer.newline()
- def toXML(self, writer, ttFont):
- if self.tableVersion != 0x00005000:
- writer.comment("Most of this table will be recalculated by the compiler")
- writer.newline()
- formatstring, names, fixes = sstruct.getformat(maxpFormat_0_5)
- if self.tableVersion != 0x00005000:
- formatstring, names_1_0, fixes = sstruct.getformat(maxpFormat_1_0_add)
- names = names + names_1_0
- for name in names:
- value = getattr(self, name)
- if name == "tableVersion":
- value = hex(value)
- writer.simpletag(name, value=value)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- setattr(self, name, safeEval(attrs["value"]))
+ def fromXML(self, name, attrs, content, ttFont):
+ setattr(self, name, safeEval(attrs["value"]))
diff --git a/Lib/fontTools/ttLib/tables/_m_e_t_a.py b/Lib/fontTools/ttLib/tables/_m_e_t_a.py
index 3faf0a56..3af9e543 100644
--- a/Lib/fontTools/ttLib/tables/_m_e_t_a.py
+++ b/Lib/fontTools/ttLib/tables/_m_e_t_a.py
@@ -30,16 +30,15 @@ class table__m_e_t_a(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
headerSize = sstruct.calcsize(META_HEADER_FORMAT)
- header = sstruct.unpack(META_HEADER_FORMAT, data[0 : headerSize])
+ header = sstruct.unpack(META_HEADER_FORMAT, data[0:headerSize])
if header["version"] != 1:
- raise TTLibError("unsupported 'meta' version %d" %
- header["version"])
+ raise TTLibError("unsupported 'meta' version %d" % header["version"])
dataMapSize = sstruct.calcsize(DATA_MAP_FORMAT)
for i in range(header["numDataMaps"]):
dataMapOffset = headerSize + i * dataMapSize
dataMap = sstruct.unpack(
- DATA_MAP_FORMAT,
- data[dataMapOffset : dataMapOffset + dataMapSize])
+ DATA_MAP_FORMAT, data[dataMapOffset : dataMapOffset + dataMapSize]
+ )
tag = dataMap["tag"]
offset = dataMap["dataOffset"]
self.data[tag] = data[offset : offset + dataMap["dataLength"]]
@@ -50,12 +49,15 @@ class table__m_e_t_a(DefaultTable.DefaultTable):
keys = sorted(self.data.keys())
headerSize = sstruct.calcsize(META_HEADER_FORMAT)
dataOffset = headerSize + len(keys) * sstruct.calcsize(DATA_MAP_FORMAT)
- header = sstruct.pack(META_HEADER_FORMAT, {
+ header = sstruct.pack(
+ META_HEADER_FORMAT,
+ {
"version": 1,
"flags": 0,
"dataOffset": dataOffset,
- "numDataMaps": len(keys)
- })
+ "numDataMaps": len(keys),
+ },
+ )
dataMaps = []
dataBlocks = []
for tag in keys:
@@ -63,11 +65,12 @@ class table__m_e_t_a(DefaultTable.DefaultTable):
data = self.data[tag].encode("utf-8")
else:
data = self.data[tag]
- dataMaps.append(sstruct.pack(DATA_MAP_FORMAT, {
- "tag": tag,
- "dataOffset": dataOffset,
- "dataLength": len(data)
- }))
+ dataMaps.append(
+ sstruct.pack(
+ DATA_MAP_FORMAT,
+ {"tag": tag, "dataOffset": dataOffset, "dataLength": len(data)},
+ )
+ )
dataBlocks.append(data)
dataOffset += len(data)
return bytesjoin([header] + dataMaps + dataBlocks)
diff --git a/Lib/fontTools/ttLib/tables/_n_a_m_e.py b/Lib/fontTools/ttLib/tables/_n_a_m_e.py
index 9558addb..bbb4f536 100644
--- a/Lib/fontTools/ttLib/tables/_n_a_m_e.py
+++ b/Lib/fontTools/ttLib/tables/_n_a_m_e.py
@@ -1,8 +1,20 @@
# -*- coding: utf-8 -*-
from fontTools.misc import sstruct
-from fontTools.misc.textTools import bytechr, byteord, bytesjoin, strjoin, tobytes, tostr, safeEval
+from fontTools.misc.textTools import (
+ bytechr,
+ byteord,
+ bytesjoin,
+ strjoin,
+ tobytes,
+ tostr,
+ safeEval,
+)
from fontTools.misc.encodingTools import getEncoding
from fontTools.ttLib import newTable
+from fontTools.ttLib.ttVisitor import TTVisitor
+from fontTools import ttLib
+import fontTools.ttLib.tables.otTables as otTables
+from fontTools.ttLib.tables import C_P_A_L_
from . import DefaultTable
import struct
import logging
@@ -24,573 +36,643 @@ nameRecordSize = sstruct.calcsize(nameRecordFormat)
class table__n_a_m_e(DefaultTable.DefaultTable):
- dependencies = ["ltag"]
-
- def decompile(self, data, ttFont):
- format, n, stringOffset = struct.unpack(b">HHH", data[:6])
- expectedStringOffset = 6 + n * nameRecordSize
- if stringOffset != expectedStringOffset:
- log.error(
- "'name' table stringOffset incorrect. Expected: %s; Actual: %s",
- expectedStringOffset, stringOffset)
- stringData = data[stringOffset:]
- data = data[6:]
- self.names = []
- for i in range(n):
- if len(data) < 12:
- log.error('skipping malformed name record #%d', i)
- continue
- name, data = sstruct.unpack2(nameRecordFormat, data, NameRecord())
- name.string = stringData[name.offset:name.offset+name.length]
- if name.offset + name.length > len(stringData):
- log.error('skipping malformed name record #%d', i)
- continue
- assert len(name.string) == name.length
- #if (name.platEncID, name.platformID) in ((0, 0), (1, 3)):
- # if len(name.string) % 2:
- # print "2-byte string doesn't have even length!"
- # print name.__dict__
- del name.offset, name.length
- self.names.append(name)
-
- def compile(self, ttFont):
- if not hasattr(self, "names"):
- # only happens when there are NO name table entries read
- # from the TTX file
- self.names = []
- names = self.names
- names.sort() # sort according to the spec; see NameRecord.__lt__()
- stringData = b""
- format = 0
- n = len(names)
- stringOffset = 6 + n * sstruct.calcsize(nameRecordFormat)
- data = struct.pack(b">HHH", format, n, stringOffset)
- lastoffset = 0
- done = {} # remember the data so we can reuse the "pointers"
- for name in names:
- string = name.toBytes()
- if string in done:
- name.offset, name.length = done[string]
- else:
- name.offset, name.length = done[string] = len(stringData), len(string)
- stringData = bytesjoin([stringData, string])
- data = data + sstruct.pack(nameRecordFormat, name)
- return data + stringData
-
- def toXML(self, writer, ttFont):
- for name in self.names:
- name.toXML(writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if name != "namerecord":
- return # ignore unknown tags
- if not hasattr(self, "names"):
- self.names = []
- name = NameRecord()
- self.names.append(name)
- name.fromXML(name, attrs, content, ttFont)
-
- def getName(self, nameID, platformID, platEncID, langID=None):
- for namerecord in self.names:
- if ( namerecord.nameID == nameID and
- namerecord.platformID == platformID and
- namerecord.platEncID == platEncID):
- if langID is None or namerecord.langID == langID:
- return namerecord
- return None # not found
-
- def getDebugName(self, nameID):
- englishName = someName = None
- for name in self.names:
- if name.nameID != nameID:
- continue
- try:
- unistr = name.toUnicode()
- except UnicodeDecodeError:
- continue
-
- someName = unistr
- if (name.platformID, name.langID) in ((1, 0), (3, 0x409)):
- englishName = unistr
- break
- if englishName:
- return englishName
- elif someName:
- return someName
- else:
- return None
-
- def getFirstDebugName(self, nameIDs):
- for nameID in nameIDs:
- name = self.getDebugName(nameID)
- if name is not None:
- return name
- return None
-
- def getBestFamilyName(self):
- # 21 = WWS Family Name
- # 16 = Typographic Family Name
- # 1 = Family Name
- return self.getFirstDebugName((21, 16, 1))
-
- def getBestSubFamilyName(self):
- # 22 = WWS SubFamily Name
- # 17 = Typographic SubFamily Name
- # 2 = SubFamily Name
- return self.getFirstDebugName((22, 17, 2))
-
- def getBestFullName(self):
- # 4 = Full Name
- # 6 = PostScript Name
- for nameIDs in ((21, 22), (16, 17), (1, 2), (4, ), (6, )):
- if len(nameIDs) == 2:
- name_fam = self.getDebugName(nameIDs[0])
- name_subfam = self.getDebugName(nameIDs[1])
- if None in [name_fam, name_subfam]:
- continue # if any is None, skip
- name = f"{name_fam} {name_subfam}"
- if name_subfam.lower() == 'regular':
- name = f"{name_fam}"
- return name
- else:
- name = self.getDebugName(nameIDs[0])
- if name is not None:
- return name
- return None
-
- def setName(self, string, nameID, platformID, platEncID, langID):
- """ Set the 'string' for the name record identified by 'nameID', 'platformID',
- 'platEncID' and 'langID'. If a record with that nameID doesn't exist, create it
- and append to the name table.
-
- 'string' can be of type `str` (`unicode` in PY2) or `bytes`. In the latter case,
- it is assumed to be already encoded with the correct plaform-specific encoding
- identified by the (platformID, platEncID, langID) triplet. A warning is issued
- to prevent unexpected results.
- """
- if not hasattr(self, 'names'):
- self.names = []
- if not isinstance(string, str):
- if isinstance(string, bytes):
- log.warning(
- "name string is bytes, ensure it's correctly encoded: %r", string)
- else:
- raise TypeError(
- "expected unicode or bytes, found %s: %r" % (
- type(string).__name__, string))
- namerecord = self.getName(nameID, platformID, platEncID, langID)
- if namerecord:
- namerecord.string = string
- else:
- self.names.append(makeName(string, nameID, platformID, platEncID, langID))
-
- def removeNames(self, nameID=None, platformID=None, platEncID=None, langID=None):
- """Remove any name records identified by the given combination of 'nameID',
- 'platformID', 'platEncID' and 'langID'.
- """
- args = {
- argName: argValue
- for argName, argValue in (
- ("nameID", nameID),
- ("platformID", platformID),
- ("platEncID", platEncID),
- ("langID", langID),
- )
- if argValue is not None
- }
- if not args:
- # no arguments, nothing to do
- return
- self.names = [
- rec for rec in self.names
- if any(
- argValue != getattr(rec, argName)
- for argName, argValue in args.items()
- )
- ]
-
- def _findUnusedNameID(self, minNameID=256):
- """Finds an unused name id.
-
- The nameID is assigned in the range between 'minNameID' and 32767 (inclusive),
- following the last nameID in the name table.
- """
- names = getattr(self, 'names', [])
- nameID = 1 + max([n.nameID for n in names] + [minNameID - 1])
- if nameID > 32767:
- raise ValueError("nameID must be less than 32768")
- return nameID
-
- def findMultilingualName(self, names, windows=True, mac=True, minNameID=0):
- """Return the name ID of an existing multilingual name that
- matches the 'names' dictionary, or None if not found.
-
- 'names' is a dictionary with the name in multiple languages,
- such as {'en': 'Pale', 'de': 'Blaß', 'de-CH': 'Blass'}.
- The keys can be arbitrary IETF BCP 47 language codes;
- the values are Unicode strings.
-
- If 'windows' is True, the returned name ID is guaranteed
- exist for all requested languages for platformID=3 and
- platEncID=1.
- If 'mac' is True, the returned name ID is guaranteed to exist
- for all requested languages for platformID=1 and platEncID=0.
-
- The returned name ID will not be less than the 'minNameID'
- argument.
- """
- # Gather the set of requested
- # (string, platformID, platEncID, langID)
- # tuples
- reqNameSet = set()
- for lang, name in sorted(names.items()):
- if windows:
- windowsName = _makeWindowsName(name, None, lang)
- if windowsName is not None:
- reqNameSet.add((windowsName.string,
- windowsName.platformID,
- windowsName.platEncID,
- windowsName.langID))
- if mac:
- macName = _makeMacName(name, None, lang)
- if macName is not None:
- reqNameSet.add((macName.string,
- macName.platformID,
- macName.platEncID,
- macName.langID))
-
- # Collect matching name IDs
- matchingNames = dict()
- for name in self.names:
- try:
- key = (name.toUnicode(), name.platformID,
- name.platEncID, name.langID)
- except UnicodeDecodeError:
- continue
- if key in reqNameSet and name.nameID >= minNameID:
- nameSet = matchingNames.setdefault(name.nameID, set())
- nameSet.add(key)
-
- # Return the first name ID that defines all requested strings
- for nameID, nameSet in sorted(matchingNames.items()):
- if nameSet == reqNameSet:
- return nameID
-
- return None # not found
-
- def addMultilingualName(self, names, ttFont=None, nameID=None,
- windows=True, mac=True, minNameID=0):
- """Add a multilingual name, returning its name ID
-
- 'names' is a dictionary with the name in multiple languages,
- such as {'en': 'Pale', 'de': 'Blaß', 'de-CH': 'Blass'}.
- The keys can be arbitrary IETF BCP 47 language codes;
- the values are Unicode strings.
-
- 'ttFont' is the TTFont to which the names are added, or None.
- If present, the font's 'ltag' table can get populated
- to store exotic language codes, which allows encoding
- names that otherwise cannot get encoded at all.
-
- 'nameID' is the name ID to be used, or None to let the library
- find an existing set of name records that match, or pick an
- unused name ID.
-
- If 'windows' is True, a platformID=3 name record will be added.
- If 'mac' is True, a platformID=1 name record will be added.
-
- If the 'nameID' argument is None, the created nameID will not
- be less than the 'minNameID' argument.
- """
- if not hasattr(self, 'names'):
- self.names = []
- if nameID is None:
- # Reuse nameID if possible
- nameID = self.findMultilingualName(
- names, windows=windows, mac=mac, minNameID=minNameID)
- if nameID is not None:
- return nameID
- nameID = self._findUnusedNameID()
- # TODO: Should minimize BCP 47 language codes.
- # https://github.com/fonttools/fonttools/issues/930
- for lang, name in sorted(names.items()):
- if windows:
- windowsName = _makeWindowsName(name, nameID, lang)
- if windowsName is not None:
- self.names.append(windowsName)
- else:
- # We cannot not make a Windows name: make sure we add a
- # Mac name as a fallback. This can happen for exotic
- # BCP47 language tags that have no Windows language code.
- mac = True
- if mac:
- macName = _makeMacName(name, nameID, lang, ttFont)
- if macName is not None:
- self.names.append(macName)
- return nameID
-
- def addName(self, string, platforms=((1, 0, 0), (3, 1, 0x409)), minNameID=255):
- """ Add a new name record containing 'string' for each (platformID, platEncID,
- langID) tuple specified in the 'platforms' list.
-
- The nameID is assigned in the range between 'minNameID'+1 and 32767 (inclusive),
- following the last nameID in the name table.
- If no 'platforms' are specified, two English name records are added, one for the
- Macintosh (platformID=0), and one for the Windows platform (3).
-
- The 'string' must be a Unicode string, so it can be encoded with different,
- platform-specific encodings.
-
- Return the new nameID.
- """
- assert len(platforms) > 0, \
- "'platforms' must contain at least one (platformID, platEncID, langID) tuple"
- if not hasattr(self, 'names'):
- self.names = []
- if not isinstance(string, str):
- raise TypeError(
- "expected str, found %s: %r" % (type(string).__name__, string))
- nameID = self._findUnusedNameID(minNameID + 1)
- for platformID, platEncID, langID in platforms:
- self.names.append(makeName(string, nameID, platformID, platEncID, langID))
- return nameID
+ dependencies = ["ltag"]
+
+ def decompile(self, data, ttFont):
+ format, n, stringOffset = struct.unpack(b">HHH", data[:6])
+ expectedStringOffset = 6 + n * nameRecordSize
+ if stringOffset != expectedStringOffset:
+ log.error(
+ "'name' table stringOffset incorrect. Expected: %s; Actual: %s",
+ expectedStringOffset,
+ stringOffset,
+ )
+ stringData = data[stringOffset:]
+ data = data[6:]
+ self.names = []
+ for i in range(n):
+ if len(data) < 12:
+ log.error("skipping malformed name record #%d", i)
+ continue
+ name, data = sstruct.unpack2(nameRecordFormat, data, NameRecord())
+ name.string = stringData[name.offset : name.offset + name.length]
+ if name.offset + name.length > len(stringData):
+ log.error("skipping malformed name record #%d", i)
+ continue
+ assert len(name.string) == name.length
+ # if (name.platEncID, name.platformID) in ((0, 0), (1, 3)):
+ # if len(name.string) % 2:
+ # print "2-byte string doesn't have even length!"
+ # print name.__dict__
+ del name.offset, name.length
+ self.names.append(name)
+
+ def compile(self, ttFont):
+ if not hasattr(self, "names"):
+ # only happens when there are NO name table entries read
+ # from the TTX file
+ self.names = []
+ names = self.names
+ names.sort() # sort according to the spec; see NameRecord.__lt__()
+ stringData = b""
+ format = 0
+ n = len(names)
+ stringOffset = 6 + n * sstruct.calcsize(nameRecordFormat)
+ data = struct.pack(b">HHH", format, n, stringOffset)
+ lastoffset = 0
+ done = {} # remember the data so we can reuse the "pointers"
+ for name in names:
+ string = name.toBytes()
+ if string in done:
+ name.offset, name.length = done[string]
+ else:
+ name.offset, name.length = done[string] = len(stringData), len(string)
+ stringData = bytesjoin([stringData, string])
+ data = data + sstruct.pack(nameRecordFormat, name)
+ return data + stringData
+
+ def toXML(self, writer, ttFont):
+ for name in self.names:
+ name.toXML(writer, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name != "namerecord":
+ return # ignore unknown tags
+ if not hasattr(self, "names"):
+ self.names = []
+ name = NameRecord()
+ self.names.append(name)
+ name.fromXML(name, attrs, content, ttFont)
+
+ def getName(self, nameID, platformID, platEncID, langID=None):
+ for namerecord in self.names:
+ if (
+ namerecord.nameID == nameID
+ and namerecord.platformID == platformID
+ and namerecord.platEncID == platEncID
+ ):
+ if langID is None or namerecord.langID == langID:
+ return namerecord
+ return None # not found
+
+ def getDebugName(self, nameID):
+ englishName = someName = None
+ for name in self.names:
+ if name.nameID != nameID:
+ continue
+ try:
+ unistr = name.toUnicode()
+ except UnicodeDecodeError:
+ continue
+
+ someName = unistr
+ if (name.platformID, name.langID) in ((1, 0), (3, 0x409)):
+ englishName = unistr
+ break
+ if englishName:
+ return englishName
+ elif someName:
+ return someName
+ else:
+ return None
+
+ def getFirstDebugName(self, nameIDs):
+ for nameID in nameIDs:
+ name = self.getDebugName(nameID)
+ if name is not None:
+ return name
+ return None
+
+ def getBestFamilyName(self):
+ # 21 = WWS Family Name
+ # 16 = Typographic Family Name
+ # 1 = Family Name
+ return self.getFirstDebugName((21, 16, 1))
+
+ def getBestSubFamilyName(self):
+ # 22 = WWS SubFamily Name
+ # 17 = Typographic SubFamily Name
+ # 2 = SubFamily Name
+ return self.getFirstDebugName((22, 17, 2))
+
+ def getBestFullName(self):
+ # 4 = Full Name
+ # 6 = PostScript Name
+ for nameIDs in ((21, 22), (16, 17), (1, 2), (4,), (6,)):
+ if len(nameIDs) == 2:
+ name_fam = self.getDebugName(nameIDs[0])
+ name_subfam = self.getDebugName(nameIDs[1])
+ if None in [name_fam, name_subfam]:
+ continue # if any is None, skip
+ name = f"{name_fam} {name_subfam}"
+ if name_subfam.lower() == "regular":
+ name = f"{name_fam}"
+ return name
+ else:
+ name = self.getDebugName(nameIDs[0])
+ if name is not None:
+ return name
+ return None
+
+ def setName(self, string, nameID, platformID, platEncID, langID):
+ """Set the 'string' for the name record identified by 'nameID', 'platformID',
+ 'platEncID' and 'langID'. If a record with that nameID doesn't exist, create it
+ and append to the name table.
+
+ 'string' can be of type `str` (`unicode` in PY2) or `bytes`. In the latter case,
+ it is assumed to be already encoded with the correct plaform-specific encoding
+ identified by the (platformID, platEncID, langID) triplet. A warning is issued
+ to prevent unexpected results.
+ """
+ if not hasattr(self, "names"):
+ self.names = []
+ if not isinstance(string, str):
+ if isinstance(string, bytes):
+ log.warning(
+ "name string is bytes, ensure it's correctly encoded: %r", string
+ )
+ else:
+ raise TypeError(
+ "expected unicode or bytes, found %s: %r"
+ % (type(string).__name__, string)
+ )
+ namerecord = self.getName(nameID, platformID, platEncID, langID)
+ if namerecord:
+ namerecord.string = string
+ else:
+ self.names.append(makeName(string, nameID, platformID, platEncID, langID))
+
+ def removeNames(self, nameID=None, platformID=None, platEncID=None, langID=None):
+ """Remove any name records identified by the given combination of 'nameID',
+ 'platformID', 'platEncID' and 'langID'.
+ """
+ args = {
+ argName: argValue
+ for argName, argValue in (
+ ("nameID", nameID),
+ ("platformID", platformID),
+ ("platEncID", platEncID),
+ ("langID", langID),
+ )
+ if argValue is not None
+ }
+ if not args:
+ # no arguments, nothing to do
+ return
+ self.names = [
+ rec
+ for rec in self.names
+ if any(
+ argValue != getattr(rec, argName) for argName, argValue in args.items()
+ )
+ ]
+
+ @staticmethod
+ def removeUnusedNames(ttFont):
+ """Remove any name records which are not in NameID range 0-255 and not utilized
+ within the font itself."""
+ visitor = NameRecordVisitor()
+ visitor.visit(ttFont)
+ toDelete = set()
+ for record in ttFont["name"].names:
+ # Name IDs 26 to 255, inclusive, are reserved for future standard names.
+ # https://learn.microsoft.com/en-us/typography/opentype/spec/name#name-ids
+ if record.nameID < 256:
+ continue
+ if record.nameID not in visitor.seen:
+ toDelete.add(record.nameID)
+
+ for nameID in toDelete:
+ ttFont["name"].removeNames(nameID)
+ return toDelete
+
+ def _findUnusedNameID(self, minNameID=256):
+ """Finds an unused name id.
+
+ The nameID is assigned in the range between 'minNameID' and 32767 (inclusive),
+ following the last nameID in the name table.
+ """
+ names = getattr(self, "names", [])
+ nameID = 1 + max([n.nameID for n in names] + [minNameID - 1])
+ if nameID > 32767:
+ raise ValueError("nameID must be less than 32768")
+ return nameID
+
+ def findMultilingualName(
+ self, names, windows=True, mac=True, minNameID=0, ttFont=None
+ ):
+ """Return the name ID of an existing multilingual name that
+ matches the 'names' dictionary, or None if not found.
+
+ 'names' is a dictionary with the name in multiple languages,
+ such as {'en': 'Pale', 'de': 'Blaß', 'de-CH': 'Blass'}.
+ The keys can be arbitrary IETF BCP 47 language codes;
+ the values are Unicode strings.
+
+ If 'windows' is True, the returned name ID is guaranteed
+ exist for all requested languages for platformID=3 and
+ platEncID=1.
+ If 'mac' is True, the returned name ID is guaranteed to exist
+ for all requested languages for platformID=1 and platEncID=0.
+
+ The returned name ID will not be less than the 'minNameID'
+ argument.
+ """
+ # Gather the set of requested
+ # (string, platformID, platEncID, langID)
+ # tuples
+ reqNameSet = set()
+ for lang, name in sorted(names.items()):
+ if windows:
+ windowsName = _makeWindowsName(name, None, lang)
+ if windowsName is not None:
+ reqNameSet.add(
+ (
+ windowsName.string,
+ windowsName.platformID,
+ windowsName.platEncID,
+ windowsName.langID,
+ )
+ )
+ if mac:
+ macName = _makeMacName(name, None, lang, ttFont)
+ if macName is not None:
+ reqNameSet.add(
+ (
+ macName.string,
+ macName.platformID,
+ macName.platEncID,
+ macName.langID,
+ )
+ )
+
+ # Collect matching name IDs
+ matchingNames = dict()
+ for name in self.names:
+ try:
+ key = (name.toUnicode(), name.platformID, name.platEncID, name.langID)
+ except UnicodeDecodeError:
+ continue
+ if key in reqNameSet and name.nameID >= minNameID:
+ nameSet = matchingNames.setdefault(name.nameID, set())
+ nameSet.add(key)
+
+ # Return the first name ID that defines all requested strings
+ for nameID, nameSet in sorted(matchingNames.items()):
+ if nameSet == reqNameSet:
+ return nameID
+
+ return None # not found
+
+ def addMultilingualName(
+ self, names, ttFont=None, nameID=None, windows=True, mac=True, minNameID=0
+ ):
+ """Add a multilingual name, returning its name ID
+
+ 'names' is a dictionary with the name in multiple languages,
+ such as {'en': 'Pale', 'de': 'Blaß', 'de-CH': 'Blass'}.
+ The keys can be arbitrary IETF BCP 47 language codes;
+ the values are Unicode strings.
+
+ 'ttFont' is the TTFont to which the names are added, or None.
+ If present, the font's 'ltag' table can get populated
+ to store exotic language codes, which allows encoding
+ names that otherwise cannot get encoded at all.
+
+ 'nameID' is the name ID to be used, or None to let the library
+ find an existing set of name records that match, or pick an
+ unused name ID.
+
+ If 'windows' is True, a platformID=3 name record will be added.
+ If 'mac' is True, a platformID=1 name record will be added.
+
+ If the 'nameID' argument is None, the created nameID will not
+ be less than the 'minNameID' argument.
+ """
+ if not hasattr(self, "names"):
+ self.names = []
+ if nameID is None:
+ # Reuse nameID if possible
+ nameID = self.findMultilingualName(
+ names, windows=windows, mac=mac, minNameID=minNameID, ttFont=ttFont
+ )
+ if nameID is not None:
+ return nameID
+ nameID = self._findUnusedNameID()
+ # TODO: Should minimize BCP 47 language codes.
+ # https://github.com/fonttools/fonttools/issues/930
+ for lang, name in sorted(names.items()):
+ if windows:
+ windowsName = _makeWindowsName(name, nameID, lang)
+ if windowsName is not None:
+ self.names.append(windowsName)
+ else:
+ # We cannot not make a Windows name: make sure we add a
+ # Mac name as a fallback. This can happen for exotic
+ # BCP47 language tags that have no Windows language code.
+ mac = True
+ if mac:
+ macName = _makeMacName(name, nameID, lang, ttFont)
+ if macName is not None:
+ self.names.append(macName)
+ return nameID
+
+ def addName(self, string, platforms=((1, 0, 0), (3, 1, 0x409)), minNameID=255):
+ """Add a new name record containing 'string' for each (platformID, platEncID,
+ langID) tuple specified in the 'platforms' list.
+
+ The nameID is assigned in the range between 'minNameID'+1 and 32767 (inclusive),
+ following the last nameID in the name table.
+ If no 'platforms' are specified, two English name records are added, one for the
+ Macintosh (platformID=0), and one for the Windows platform (3).
+
+ The 'string' must be a Unicode string, so it can be encoded with different,
+ platform-specific encodings.
+
+ Return the new nameID.
+ """
+ assert (
+ len(platforms) > 0
+ ), "'platforms' must contain at least one (platformID, platEncID, langID) tuple"
+ if not hasattr(self, "names"):
+ self.names = []
+ if not isinstance(string, str):
+ raise TypeError(
+ "expected str, found %s: %r" % (type(string).__name__, string)
+ )
+ nameID = self._findUnusedNameID(minNameID + 1)
+ for platformID, platEncID, langID in platforms:
+ self.names.append(makeName(string, nameID, platformID, platEncID, langID))
+ return nameID
def makeName(string, nameID, platformID, platEncID, langID):
- name = NameRecord()
- name.string, name.nameID, name.platformID, name.platEncID, name.langID = (
- string, nameID, platformID, platEncID, langID)
- return name
+ name = NameRecord()
+ name.string, name.nameID, name.platformID, name.platEncID, name.langID = (
+ string,
+ nameID,
+ platformID,
+ platEncID,
+ langID,
+ )
+ return name
def _makeWindowsName(name, nameID, language):
- """Create a NameRecord for the Microsoft Windows platform
-
- 'language' is an arbitrary IETF BCP 47 language identifier such
- as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. If Microsoft Windows
- does not support the desired language, the result will be None.
- Future versions of fonttools might return a NameRecord for the
- OpenType 'name' table format 1, but this is not implemented yet.
- """
- langID = _WINDOWS_LANGUAGE_CODES.get(language.lower())
- if langID is not None:
- return makeName(name, nameID, 3, 1, langID)
- else:
- log.warning("cannot add Windows name in language %s "
- "because fonttools does not yet support "
- "name table format 1" % language)
- return None
+ """Create a NameRecord for the Microsoft Windows platform
+
+ 'language' is an arbitrary IETF BCP 47 language identifier such
+ as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. If Microsoft Windows
+ does not support the desired language, the result will be None.
+ Future versions of fonttools might return a NameRecord for the
+ OpenType 'name' table format 1, but this is not implemented yet.
+ """
+ langID = _WINDOWS_LANGUAGE_CODES.get(language.lower())
+ if langID is not None:
+ return makeName(name, nameID, 3, 1, langID)
+ else:
+ log.warning(
+ "cannot add Windows name in language %s "
+ "because fonttools does not yet support "
+ "name table format 1" % language
+ )
+ return None
def _makeMacName(name, nameID, language, font=None):
- """Create a NameRecord for Apple platforms
-
- 'language' is an arbitrary IETF BCP 47 language identifier such
- as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. When possible, we
- create a Macintosh NameRecord that is understood by old applications
- (platform ID 1 and an old-style Macintosh language enum). If this
- is not possible, we create a Unicode NameRecord (platform ID 0)
- whose language points to the font’s 'ltag' table. The latter
- can encode any string in any language, but legacy applications
- might not recognize the format (in which case they will ignore
- those names).
-
- 'font' should be the TTFont for which you want to create a name.
- If 'font' is None, we only return NameRecords for legacy Macintosh;
- in that case, the result will be None for names that need to
- be encoded with an 'ltag' table.
-
- See the section “The language identifier” in Apple’s specification:
- https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html
- """
- macLang = _MAC_LANGUAGE_CODES.get(language.lower())
- macScript = _MAC_LANGUAGE_TO_SCRIPT.get(macLang)
- if macLang is not None and macScript is not None:
- encoding = getEncoding(1, macScript, macLang, default="ascii")
- # Check if we can actually encode this name. If we can't,
- # for example because we have no support for the legacy
- # encoding, or because the name string contains Unicode
- # characters that the legacy encoding cannot represent,
- # we fall back to encoding the name in Unicode and put
- # the language tag into the ltag table.
- try:
- _ = tobytes(name, encoding, errors="strict")
- return makeName(name, nameID, 1, macScript, macLang)
- except UnicodeEncodeError:
- pass
- if font is not None:
- ltag = font.tables.get("ltag")
- if ltag is None:
- ltag = font["ltag"] = newTable("ltag")
- # 0 = Unicode; 4 = “Unicode 2.0 or later semantics (non-BMP characters allowed)”
- # “The preferred platform-specific code for Unicode would be 3 or 4.”
- # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html
- return makeName(name, nameID, 0, 4, ltag.addTag(language))
- else:
- log.warning("cannot store language %s into 'ltag' table "
- "without having access to the TTFont object" %
- language)
- return None
+ """Create a NameRecord for Apple platforms
+
+ 'language' is an arbitrary IETF BCP 47 language identifier such
+ as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. When possible, we
+ create a Macintosh NameRecord that is understood by old applications
+ (platform ID 1 and an old-style Macintosh language enum). If this
+ is not possible, we create a Unicode NameRecord (platform ID 0)
+ whose language points to the font’s 'ltag' table. The latter
+ can encode any string in any language, but legacy applications
+ might not recognize the format (in which case they will ignore
+ those names).
+
+ 'font' should be the TTFont for which you want to create a name.
+ If 'font' is None, we only return NameRecords for legacy Macintosh;
+ in that case, the result will be None for names that need to
+ be encoded with an 'ltag' table.
+
+ See the section “The language identifier” in Apple’s specification:
+ https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html
+ """
+ macLang = _MAC_LANGUAGE_CODES.get(language.lower())
+ macScript = _MAC_LANGUAGE_TO_SCRIPT.get(macLang)
+ if macLang is not None and macScript is not None:
+ encoding = getEncoding(1, macScript, macLang, default="ascii")
+ # Check if we can actually encode this name. If we can't,
+ # for example because we have no support for the legacy
+ # encoding, or because the name string contains Unicode
+ # characters that the legacy encoding cannot represent,
+ # we fall back to encoding the name in Unicode and put
+ # the language tag into the ltag table.
+ try:
+ _ = tobytes(name, encoding, errors="strict")
+ return makeName(name, nameID, 1, macScript, macLang)
+ except UnicodeEncodeError:
+ pass
+ if font is not None:
+ ltag = font.tables.get("ltag")
+ if ltag is None:
+ ltag = font["ltag"] = newTable("ltag")
+ # 0 = Unicode; 4 = “Unicode 2.0 or later semantics (non-BMP characters allowed)”
+ # “The preferred platform-specific code for Unicode would be 3 or 4.”
+ # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html
+ return makeName(name, nameID, 0, 4, ltag.addTag(language))
+ else:
+ log.warning(
+ "cannot store language %s into 'ltag' table "
+ "without having access to the TTFont object" % language
+ )
+ return None
class NameRecord(object):
-
- def getEncoding(self, default='ascii'):
- """Returns the Python encoding name for this name entry based on its platformID,
- platEncID, and langID. If encoding for these values is not known, by default
- 'ascii' is returned. That can be overriden by passing a value to the default
- argument.
- """
- return getEncoding(self.platformID, self.platEncID, self.langID, default)
-
- def encodingIsUnicodeCompatible(self):
- return self.getEncoding(None) in ['utf_16_be', 'ucs2be', 'ascii', 'latin1']
-
- def __str__(self):
- return self.toStr(errors='backslashreplace')
-
- def isUnicode(self):
- return (self.platformID == 0 or
- (self.platformID == 3 and self.platEncID in [0, 1, 10]))
-
- def toUnicode(self, errors='strict'):
- """
- If self.string is a Unicode string, return it; otherwise try decoding the
- bytes in self.string to a Unicode string using the encoding of this
- entry as returned by self.getEncoding(); Note that self.getEncoding()
- returns 'ascii' if the encoding is unknown to the library.
-
- Certain heuristics are performed to recover data from bytes that are
- ill-formed in the chosen encoding, or that otherwise look misencoded
- (mostly around bad UTF-16BE encoded bytes, or bytes that look like UTF-16BE
- but marked otherwise). If the bytes are ill-formed and the heuristics fail,
- the error is handled according to the errors parameter to this function, which is
- passed to the underlying decode() function; by default it throws a
- UnicodeDecodeError exception.
-
- Note: The mentioned heuristics mean that roundtripping a font to XML and back
- to binary might recover some misencoded data whereas just loading the font
- and saving it back will not change them.
- """
- def isascii(b):
- return (b >= 0x20 and b <= 0x7E) or b in [0x09, 0x0A, 0x0D]
- encoding = self.getEncoding()
- string = self.string
-
- if isinstance(string, bytes) and encoding == 'utf_16_be' and len(string) % 2 == 1:
- # Recover badly encoded UTF-16 strings that have an odd number of bytes:
- # - If the last byte is zero, drop it. Otherwise,
- # - If all the odd bytes are zero and all the even bytes are ASCII,
- # prepend one zero byte. Otherwise,
- # - If first byte is zero and all other bytes are ASCII, insert zero
- # bytes between consecutive ASCII bytes.
- #
- # (Yes, I've seen all of these in the wild... sigh)
- if byteord(string[-1]) == 0:
- string = string[:-1]
- elif all(byteord(b) == 0 if i % 2 else isascii(byteord(b)) for i,b in enumerate(string)):
- string = b'\0' + string
- elif byteord(string[0]) == 0 and all(isascii(byteord(b)) for b in string[1:]):
- string = bytesjoin(b'\0'+bytechr(byteord(b)) for b in string[1:])
-
- string = tostr(string, encoding=encoding, errors=errors)
-
- # If decoded strings still looks like UTF-16BE, it suggests a double-encoding.
- # Fix it up.
- if all(ord(c) == 0 if i % 2 == 0 else isascii(ord(c)) for i,c in enumerate(string)):
- # If string claims to be Mac encoding, but looks like UTF-16BE with ASCII text,
- # narrow it down.
- string = ''.join(c for c in string[1::2])
-
- return string
-
- def toBytes(self, errors='strict'):
- """ If self.string is a bytes object, return it; otherwise try encoding
- the Unicode string in self.string to bytes using the encoding of this
- entry as returned by self.getEncoding(); Note that self.getEncoding()
- returns 'ascii' if the encoding is unknown to the library.
-
- If the Unicode string cannot be encoded to bytes in the chosen encoding,
- the error is handled according to the errors parameter to this function,
- which is passed to the underlying encode() function; by default it throws a
- UnicodeEncodeError exception.
- """
- return tobytes(self.string, encoding=self.getEncoding(), errors=errors)
-
- toStr = toUnicode
-
- def toXML(self, writer, ttFont):
- try:
- unistr = self.toUnicode()
- except UnicodeDecodeError:
- unistr = None
- attrs = [
- ("nameID", self.nameID),
- ("platformID", self.platformID),
- ("platEncID", self.platEncID),
- ("langID", hex(self.langID)),
- ]
-
- if unistr is None or not self.encodingIsUnicodeCompatible():
- attrs.append(("unicode", unistr is not None))
-
- writer.begintag("namerecord", attrs)
- writer.newline()
- if unistr is not None:
- writer.write(unistr)
- else:
- writer.write8bit(self.string)
- writer.newline()
- writer.endtag("namerecord")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.nameID = safeEval(attrs["nameID"])
- self.platformID = safeEval(attrs["platformID"])
- self.platEncID = safeEval(attrs["platEncID"])
- self.langID = safeEval(attrs["langID"])
- s = strjoin(content).strip()
- encoding = self.getEncoding()
- if self.encodingIsUnicodeCompatible() or safeEval(attrs.get("unicode", "False")):
- self.string = s.encode(encoding)
- else:
- # This is the inverse of write8bit...
- self.string = s.encode("latin1")
-
- def __lt__(self, other):
- if type(self) != type(other):
- return NotImplemented
-
- try:
- # implemented so that list.sort() sorts according to the spec.
- selfTuple = (
- self.platformID,
- self.platEncID,
- self.langID,
- self.nameID,
- self.toBytes(),
- )
- otherTuple = (
- other.platformID,
- other.platEncID,
- other.langID,
- other.nameID,
- other.toBytes(),
- )
- return selfTuple < otherTuple
- except (UnicodeEncodeError, AttributeError):
- # This can only happen for
- # 1) an object that is not a NameRecord, or
- # 2) an unlikely incomplete NameRecord object which has not been
- # fully populated, or
- # 3) when all IDs are identical but the strings can't be encoded
- # for their platform encoding.
- # In all cases it is best to return NotImplemented.
- return NotImplemented
-
- def __repr__(self):
- return "<NameRecord NameID=%d; PlatformID=%d; LanguageID=%d>" % (
- self.nameID, self.platformID, self.langID)
+ def getEncoding(self, default="ascii"):
+ """Returns the Python encoding name for this name entry based on its platformID,
+ platEncID, and langID. If encoding for these values is not known, by default
+ 'ascii' is returned. That can be overriden by passing a value to the default
+ argument.
+ """
+ return getEncoding(self.platformID, self.platEncID, self.langID, default)
+
+ def encodingIsUnicodeCompatible(self):
+ return self.getEncoding(None) in ["utf_16_be", "ucs2be", "ascii", "latin1"]
+
+ def __str__(self):
+ return self.toStr(errors="backslashreplace")
+
+ def isUnicode(self):
+ return self.platformID == 0 or (
+ self.platformID == 3 and self.platEncID in [0, 1, 10]
+ )
+
+ def toUnicode(self, errors="strict"):
+ """
+ If self.string is a Unicode string, return it; otherwise try decoding the
+ bytes in self.string to a Unicode string using the encoding of this
+ entry as returned by self.getEncoding(); Note that self.getEncoding()
+ returns 'ascii' if the encoding is unknown to the library.
+
+ Certain heuristics are performed to recover data from bytes that are
+ ill-formed in the chosen encoding, or that otherwise look misencoded
+ (mostly around bad UTF-16BE encoded bytes, or bytes that look like UTF-16BE
+ but marked otherwise). If the bytes are ill-formed and the heuristics fail,
+ the error is handled according to the errors parameter to this function, which is
+ passed to the underlying decode() function; by default it throws a
+ UnicodeDecodeError exception.
+
+ Note: The mentioned heuristics mean that roundtripping a font to XML and back
+ to binary might recover some misencoded data whereas just loading the font
+ and saving it back will not change them.
+ """
+
+ def isascii(b):
+ return (b >= 0x20 and b <= 0x7E) or b in [0x09, 0x0A, 0x0D]
+
+ encoding = self.getEncoding()
+ string = self.string
+
+ if (
+ isinstance(string, bytes)
+ and encoding == "utf_16_be"
+ and len(string) % 2 == 1
+ ):
+ # Recover badly encoded UTF-16 strings that have an odd number of bytes:
+ # - If the last byte is zero, drop it. Otherwise,
+ # - If all the odd bytes are zero and all the even bytes are ASCII,
+ # prepend one zero byte. Otherwise,
+ # - If first byte is zero and all other bytes are ASCII, insert zero
+ # bytes between consecutive ASCII bytes.
+ #
+ # (Yes, I've seen all of these in the wild... sigh)
+ if byteord(string[-1]) == 0:
+ string = string[:-1]
+ elif all(
+ byteord(b) == 0 if i % 2 else isascii(byteord(b))
+ for i, b in enumerate(string)
+ ):
+ string = b"\0" + string
+ elif byteord(string[0]) == 0 and all(
+ isascii(byteord(b)) for b in string[1:]
+ ):
+ string = bytesjoin(b"\0" + bytechr(byteord(b)) for b in string[1:])
+
+ string = tostr(string, encoding=encoding, errors=errors)
+
+ # If decoded strings still looks like UTF-16BE, it suggests a double-encoding.
+ # Fix it up.
+ if all(
+ ord(c) == 0 if i % 2 == 0 else isascii(ord(c)) for i, c in enumerate(string)
+ ):
+ # If string claims to be Mac encoding, but looks like UTF-16BE with ASCII text,
+ # narrow it down.
+ string = "".join(c for c in string[1::2])
+
+ return string
+
+ def toBytes(self, errors="strict"):
+ """If self.string is a bytes object, return it; otherwise try encoding
+ the Unicode string in self.string to bytes using the encoding of this
+ entry as returned by self.getEncoding(); Note that self.getEncoding()
+ returns 'ascii' if the encoding is unknown to the library.
+
+ If the Unicode string cannot be encoded to bytes in the chosen encoding,
+ the error is handled according to the errors parameter to this function,
+ which is passed to the underlying encode() function; by default it throws a
+ UnicodeEncodeError exception.
+ """
+ return tobytes(self.string, encoding=self.getEncoding(), errors=errors)
+
+ toStr = toUnicode
+
+ def toXML(self, writer, ttFont):
+ try:
+ unistr = self.toUnicode()
+ except UnicodeDecodeError:
+ unistr = None
+ attrs = [
+ ("nameID", self.nameID),
+ ("platformID", self.platformID),
+ ("platEncID", self.platEncID),
+ ("langID", hex(self.langID)),
+ ]
+
+ if unistr is None or not self.encodingIsUnicodeCompatible():
+ attrs.append(("unicode", unistr is not None))
+
+ writer.begintag("namerecord", attrs)
+ writer.newline()
+ if unistr is not None:
+ writer.write(unistr)
+ else:
+ writer.write8bit(self.string)
+ writer.newline()
+ writer.endtag("namerecord")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.nameID = safeEval(attrs["nameID"])
+ self.platformID = safeEval(attrs["platformID"])
+ self.platEncID = safeEval(attrs["platEncID"])
+ self.langID = safeEval(attrs["langID"])
+ s = strjoin(content).strip()
+ encoding = self.getEncoding()
+ if self.encodingIsUnicodeCompatible() or safeEval(
+ attrs.get("unicode", "False")
+ ):
+ self.string = s.encode(encoding)
+ else:
+ # This is the inverse of write8bit...
+ self.string = s.encode("latin1")
+
+ def __lt__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+
+ try:
+ selfTuple = (
+ self.platformID,
+ self.platEncID,
+ self.langID,
+ self.nameID,
+ )
+ otherTuple = (
+ other.platformID,
+ other.platEncID,
+ other.langID,
+ other.nameID,
+ )
+ except AttributeError:
+ # This can only happen for
+ # 1) an object that is not a NameRecord, or
+ # 2) an unlikely incomplete NameRecord object which has not been
+ # fully populated
+ return NotImplemented
+
+ try:
+ # Include the actual NameRecord string in the comparison tuples
+ selfTuple = selfTuple + (self.toBytes(),)
+ otherTuple = otherTuple + (other.toBytes(),)
+ except UnicodeEncodeError as e:
+ # toBytes caused an encoding error in either of the two, so content
+ # to sorting based on IDs only
+ log.error("NameRecord sorting failed to encode: %s" % e)
+
+ # Implemented so that list.sort() sorts according to the spec by using
+ # the order of the tuple items and their comparison
+ return selfTuple < otherTuple
+
+ def __repr__(self):
+ return "<NameRecord NameID=%d; PlatformID=%d; LanguageID=%d>" % (
+ self.nameID,
+ self.platformID,
+ self.langID,
+ )
# Windows language ID → IETF BCP-47 language tag
@@ -604,183 +686,182 @@ class NameRecord(object):
# http://www.unicode.org/cldr/charts/latest/supplemental/likely_subtags.html
# http://www.iana.org/assignments/language-subtag-registry/language-subtag-registry
_WINDOWS_LANGUAGES = {
- 0x0436: 'af',
- 0x041C: 'sq',
- 0x0484: 'gsw',
- 0x045E: 'am',
- 0x1401: 'ar-DZ',
- 0x3C01: 'ar-BH',
- 0x0C01: 'ar',
- 0x0801: 'ar-IQ',
- 0x2C01: 'ar-JO',
- 0x3401: 'ar-KW',
- 0x3001: 'ar-LB',
- 0x1001: 'ar-LY',
- 0x1801: 'ary',
- 0x2001: 'ar-OM',
- 0x4001: 'ar-QA',
- 0x0401: 'ar-SA',
- 0x2801: 'ar-SY',
- 0x1C01: 'aeb',
- 0x3801: 'ar-AE',
- 0x2401: 'ar-YE',
- 0x042B: 'hy',
- 0x044D: 'as',
- 0x082C: 'az-Cyrl',
- 0x042C: 'az',
- 0x046D: 'ba',
- 0x042D: 'eu',
- 0x0423: 'be',
- 0x0845: 'bn',
- 0x0445: 'bn-IN',
- 0x201A: 'bs-Cyrl',
- 0x141A: 'bs',
- 0x047E: 'br',
- 0x0402: 'bg',
- 0x0403: 'ca',
- 0x0C04: 'zh-HK',
- 0x1404: 'zh-MO',
- 0x0804: 'zh',
- 0x1004: 'zh-SG',
- 0x0404: 'zh-TW',
- 0x0483: 'co',
- 0x041A: 'hr',
- 0x101A: 'hr-BA',
- 0x0405: 'cs',
- 0x0406: 'da',
- 0x048C: 'prs',
- 0x0465: 'dv',
- 0x0813: 'nl-BE',
- 0x0413: 'nl',
- 0x0C09: 'en-AU',
- 0x2809: 'en-BZ',
- 0x1009: 'en-CA',
- 0x2409: 'en-029',
- 0x4009: 'en-IN',
- 0x1809: 'en-IE',
- 0x2009: 'en-JM',
- 0x4409: 'en-MY',
- 0x1409: 'en-NZ',
- 0x3409: 'en-PH',
- 0x4809: 'en-SG',
- 0x1C09: 'en-ZA',
- 0x2C09: 'en-TT',
- 0x0809: 'en-GB',
- 0x0409: 'en',
- 0x3009: 'en-ZW',
- 0x0425: 'et',
- 0x0438: 'fo',
- 0x0464: 'fil',
- 0x040B: 'fi',
- 0x080C: 'fr-BE',
- 0x0C0C: 'fr-CA',
- 0x040C: 'fr',
- 0x140C: 'fr-LU',
- 0x180C: 'fr-MC',
- 0x100C: 'fr-CH',
- 0x0462: 'fy',
- 0x0456: 'gl',
- 0x0437: 'ka',
- 0x0C07: 'de-AT',
- 0x0407: 'de',
- 0x1407: 'de-LI',
- 0x1007: 'de-LU',
- 0x0807: 'de-CH',
- 0x0408: 'el',
- 0x046F: 'kl',
- 0x0447: 'gu',
- 0x0468: 'ha',
- 0x040D: 'he',
- 0x0439: 'hi',
- 0x040E: 'hu',
- 0x040F: 'is',
- 0x0470: 'ig',
- 0x0421: 'id',
- 0x045D: 'iu',
- 0x085D: 'iu-Latn',
- 0x083C: 'ga',
- 0x0434: 'xh',
- 0x0435: 'zu',
- 0x0410: 'it',
- 0x0810: 'it-CH',
- 0x0411: 'ja',
- 0x044B: 'kn',
- 0x043F: 'kk',
- 0x0453: 'km',
- 0x0486: 'quc',
- 0x0487: 'rw',
- 0x0441: 'sw',
- 0x0457: 'kok',
- 0x0412: 'ko',
- 0x0440: 'ky',
- 0x0454: 'lo',
- 0x0426: 'lv',
- 0x0427: 'lt',
- 0x082E: 'dsb',
- 0x046E: 'lb',
- 0x042F: 'mk',
- 0x083E: 'ms-BN',
- 0x043E: 'ms',
- 0x044C: 'ml',
- 0x043A: 'mt',
- 0x0481: 'mi',
- 0x047A: 'arn',
- 0x044E: 'mr',
- 0x047C: 'moh',
- 0x0450: 'mn',
- 0x0850: 'mn-CN',
- 0x0461: 'ne',
- 0x0414: 'nb',
- 0x0814: 'nn',
- 0x0482: 'oc',
- 0x0448: 'or',
- 0x0463: 'ps',
- 0x0415: 'pl',
- 0x0416: 'pt',
- 0x0816: 'pt-PT',
- 0x0446: 'pa',
- 0x046B: 'qu-BO',
- 0x086B: 'qu-EC',
- 0x0C6B: 'qu',
- 0x0418: 'ro',
- 0x0417: 'rm',
- 0x0419: 'ru',
- 0x243B: 'smn',
- 0x103B: 'smj-NO',
- 0x143B: 'smj',
- 0x0C3B: 'se-FI',
- 0x043B: 'se',
- 0x083B: 'se-SE',
- 0x203B: 'sms',
- 0x183B: 'sma-NO',
- 0x1C3B: 'sms',
- 0x044F: 'sa',
- 0x1C1A: 'sr-Cyrl-BA',
- 0x0C1A: 'sr',
- 0x181A: 'sr-Latn-BA',
- 0x081A: 'sr-Latn',
- 0x046C: 'nso',
- 0x0432: 'tn',
- 0x045B: 'si',
- 0x041B: 'sk',
- 0x0424: 'sl',
- 0x2C0A: 'es-AR',
- 0x400A: 'es-BO',
- 0x340A: 'es-CL',
- 0x240A: 'es-CO',
- 0x140A: 'es-CR',
- 0x1C0A: 'es-DO',
- 0x300A: 'es-EC',
- 0x440A: 'es-SV',
- 0x100A: 'es-GT',
- 0x480A: 'es-HN',
- 0x080A: 'es-MX',
- 0x4C0A: 'es-NI',
- 0x180A: 'es-PA',
- 0x3C0A: 'es-PY',
- 0x280A: 'es-PE',
- 0x500A: 'es-PR',
-
+ 0x0436: "af",
+ 0x041C: "sq",
+ 0x0484: "gsw",
+ 0x045E: "am",
+ 0x1401: "ar-DZ",
+ 0x3C01: "ar-BH",
+ 0x0C01: "ar",
+ 0x0801: "ar-IQ",
+ 0x2C01: "ar-JO",
+ 0x3401: "ar-KW",
+ 0x3001: "ar-LB",
+ 0x1001: "ar-LY",
+ 0x1801: "ary",
+ 0x2001: "ar-OM",
+ 0x4001: "ar-QA",
+ 0x0401: "ar-SA",
+ 0x2801: "ar-SY",
+ 0x1C01: "aeb",
+ 0x3801: "ar-AE",
+ 0x2401: "ar-YE",
+ 0x042B: "hy",
+ 0x044D: "as",
+ 0x082C: "az-Cyrl",
+ 0x042C: "az",
+ 0x046D: "ba",
+ 0x042D: "eu",
+ 0x0423: "be",
+ 0x0845: "bn",
+ 0x0445: "bn-IN",
+ 0x201A: "bs-Cyrl",
+ 0x141A: "bs",
+ 0x047E: "br",
+ 0x0402: "bg",
+ 0x0403: "ca",
+ 0x0C04: "zh-HK",
+ 0x1404: "zh-MO",
+ 0x0804: "zh",
+ 0x1004: "zh-SG",
+ 0x0404: "zh-TW",
+ 0x0483: "co",
+ 0x041A: "hr",
+ 0x101A: "hr-BA",
+ 0x0405: "cs",
+ 0x0406: "da",
+ 0x048C: "prs",
+ 0x0465: "dv",
+ 0x0813: "nl-BE",
+ 0x0413: "nl",
+ 0x0C09: "en-AU",
+ 0x2809: "en-BZ",
+ 0x1009: "en-CA",
+ 0x2409: "en-029",
+ 0x4009: "en-IN",
+ 0x1809: "en-IE",
+ 0x2009: "en-JM",
+ 0x4409: "en-MY",
+ 0x1409: "en-NZ",
+ 0x3409: "en-PH",
+ 0x4809: "en-SG",
+ 0x1C09: "en-ZA",
+ 0x2C09: "en-TT",
+ 0x0809: "en-GB",
+ 0x0409: "en",
+ 0x3009: "en-ZW",
+ 0x0425: "et",
+ 0x0438: "fo",
+ 0x0464: "fil",
+ 0x040B: "fi",
+ 0x080C: "fr-BE",
+ 0x0C0C: "fr-CA",
+ 0x040C: "fr",
+ 0x140C: "fr-LU",
+ 0x180C: "fr-MC",
+ 0x100C: "fr-CH",
+ 0x0462: "fy",
+ 0x0456: "gl",
+ 0x0437: "ka",
+ 0x0C07: "de-AT",
+ 0x0407: "de",
+ 0x1407: "de-LI",
+ 0x1007: "de-LU",
+ 0x0807: "de-CH",
+ 0x0408: "el",
+ 0x046F: "kl",
+ 0x0447: "gu",
+ 0x0468: "ha",
+ 0x040D: "he",
+ 0x0439: "hi",
+ 0x040E: "hu",
+ 0x040F: "is",
+ 0x0470: "ig",
+ 0x0421: "id",
+ 0x045D: "iu",
+ 0x085D: "iu-Latn",
+ 0x083C: "ga",
+ 0x0434: "xh",
+ 0x0435: "zu",
+ 0x0410: "it",
+ 0x0810: "it-CH",
+ 0x0411: "ja",
+ 0x044B: "kn",
+ 0x043F: "kk",
+ 0x0453: "km",
+ 0x0486: "quc",
+ 0x0487: "rw",
+ 0x0441: "sw",
+ 0x0457: "kok",
+ 0x0412: "ko",
+ 0x0440: "ky",
+ 0x0454: "lo",
+ 0x0426: "lv",
+ 0x0427: "lt",
+ 0x082E: "dsb",
+ 0x046E: "lb",
+ 0x042F: "mk",
+ 0x083E: "ms-BN",
+ 0x043E: "ms",
+ 0x044C: "ml",
+ 0x043A: "mt",
+ 0x0481: "mi",
+ 0x047A: "arn",
+ 0x044E: "mr",
+ 0x047C: "moh",
+ 0x0450: "mn",
+ 0x0850: "mn-CN",
+ 0x0461: "ne",
+ 0x0414: "nb",
+ 0x0814: "nn",
+ 0x0482: "oc",
+ 0x0448: "or",
+ 0x0463: "ps",
+ 0x0415: "pl",
+ 0x0416: "pt",
+ 0x0816: "pt-PT",
+ 0x0446: "pa",
+ 0x046B: "qu-BO",
+ 0x086B: "qu-EC",
+ 0x0C6B: "qu",
+ 0x0418: "ro",
+ 0x0417: "rm",
+ 0x0419: "ru",
+ 0x243B: "smn",
+ 0x103B: "smj-NO",
+ 0x143B: "smj",
+ 0x0C3B: "se-FI",
+ 0x043B: "se",
+ 0x083B: "se-SE",
+ 0x203B: "sms",
+ 0x183B: "sma-NO",
+ 0x1C3B: "sms",
+ 0x044F: "sa",
+ 0x1C1A: "sr-Cyrl-BA",
+ 0x0C1A: "sr",
+ 0x181A: "sr-Latn-BA",
+ 0x081A: "sr-Latn",
+ 0x046C: "nso",
+ 0x0432: "tn",
+ 0x045B: "si",
+ 0x041B: "sk",
+ 0x0424: "sl",
+ 0x2C0A: "es-AR",
+ 0x400A: "es-BO",
+ 0x340A: "es-CL",
+ 0x240A: "es-CO",
+ 0x140A: "es-CR",
+ 0x1C0A: "es-DO",
+ 0x300A: "es-EC",
+ 0x440A: "es-SV",
+ 0x100A: "es-GT",
+ 0x480A: "es-HN",
+ 0x080A: "es-MX",
+ 0x4C0A: "es-NI",
+ 0x180A: "es-PA",
+ 0x3C0A: "es-PY",
+ 0x280A: "es-PE",
+ 0x500A: "es-PR",
# Microsoft has defined two different language codes for
# “Spanish with modern sorting” and “Spanish with traditional
# sorting”. This makes sense for collation APIs, and it would be
@@ -788,163 +869,164 @@ _WINDOWS_LANGUAGES = {
# extensions (eg., “es-u-co-trad” is “Spanish with traditional
# sorting”). However, for storing names in fonts, this distinction
# does not make sense, so we use “es” in both cases.
- 0x0C0A: 'es',
- 0x040A: 'es',
-
- 0x540A: 'es-US',
- 0x380A: 'es-UY',
- 0x200A: 'es-VE',
- 0x081D: 'sv-FI',
- 0x041D: 'sv',
- 0x045A: 'syr',
- 0x0428: 'tg',
- 0x085F: 'tzm',
- 0x0449: 'ta',
- 0x0444: 'tt',
- 0x044A: 'te',
- 0x041E: 'th',
- 0x0451: 'bo',
- 0x041F: 'tr',
- 0x0442: 'tk',
- 0x0480: 'ug',
- 0x0422: 'uk',
- 0x042E: 'hsb',
- 0x0420: 'ur',
- 0x0843: 'uz-Cyrl',
- 0x0443: 'uz',
- 0x042A: 'vi',
- 0x0452: 'cy',
- 0x0488: 'wo',
- 0x0485: 'sah',
- 0x0478: 'ii',
- 0x046A: 'yo',
+ 0x0C0A: "es",
+ 0x040A: "es",
+ 0x540A: "es-US",
+ 0x380A: "es-UY",
+ 0x200A: "es-VE",
+ 0x081D: "sv-FI",
+ 0x041D: "sv",
+ 0x045A: "syr",
+ 0x0428: "tg",
+ 0x085F: "tzm",
+ 0x0449: "ta",
+ 0x0444: "tt",
+ 0x044A: "te",
+ 0x041E: "th",
+ 0x0451: "bo",
+ 0x041F: "tr",
+ 0x0442: "tk",
+ 0x0480: "ug",
+ 0x0422: "uk",
+ 0x042E: "hsb",
+ 0x0420: "ur",
+ 0x0843: "uz-Cyrl",
+ 0x0443: "uz",
+ 0x042A: "vi",
+ 0x0452: "cy",
+ 0x0488: "wo",
+ 0x0485: "sah",
+ 0x0478: "ii",
+ 0x046A: "yo",
}
_MAC_LANGUAGES = {
- 0: 'en',
- 1: 'fr',
- 2: 'de',
- 3: 'it',
- 4: 'nl',
- 5: 'sv',
- 6: 'es',
- 7: 'da',
- 8: 'pt',
- 9: 'no',
- 10: 'he',
- 11: 'ja',
- 12: 'ar',
- 13: 'fi',
- 14: 'el',
- 15: 'is',
- 16: 'mt',
- 17: 'tr',
- 18: 'hr',
- 19: 'zh-Hant',
- 20: 'ur',
- 21: 'hi',
- 22: 'th',
- 23: 'ko',
- 24: 'lt',
- 25: 'pl',
- 26: 'hu',
- 27: 'es',
- 28: 'lv',
- 29: 'se',
- 30: 'fo',
- 31: 'fa',
- 32: 'ru',
- 33: 'zh',
- 34: 'nl-BE',
- 35: 'ga',
- 36: 'sq',
- 37: 'ro',
- 38: 'cz',
- 39: 'sk',
- 40: 'sl',
- 41: 'yi',
- 42: 'sr',
- 43: 'mk',
- 44: 'bg',
- 45: 'uk',
- 46: 'be',
- 47: 'uz',
- 48: 'kk',
- 49: 'az-Cyrl',
- 50: 'az-Arab',
- 51: 'hy',
- 52: 'ka',
- 53: 'mo',
- 54: 'ky',
- 55: 'tg',
- 56: 'tk',
- 57: 'mn-CN',
- 58: 'mn',
- 59: 'ps',
- 60: 'ks',
- 61: 'ku',
- 62: 'sd',
- 63: 'bo',
- 64: 'ne',
- 65: 'sa',
- 66: 'mr',
- 67: 'bn',
- 68: 'as',
- 69: 'gu',
- 70: 'pa',
- 71: 'or',
- 72: 'ml',
- 73: 'kn',
- 74: 'ta',
- 75: 'te',
- 76: 'si',
- 77: 'my',
- 78: 'km',
- 79: 'lo',
- 80: 'vi',
- 81: 'id',
- 82: 'tl',
- 83: 'ms',
- 84: 'ms-Arab',
- 85: 'am',
- 86: 'ti',
- 87: 'om',
- 88: 'so',
- 89: 'sw',
- 90: 'rw',
- 91: 'rn',
- 92: 'ny',
- 93: 'mg',
- 94: 'eo',
- 128: 'cy',
- 129: 'eu',
- 130: 'ca',
- 131: 'la',
- 132: 'qu',
- 133: 'gn',
- 134: 'ay',
- 135: 'tt',
- 136: 'ug',
- 137: 'dz',
- 138: 'jv',
- 139: 'su',
- 140: 'gl',
- 141: 'af',
- 142: 'br',
- 143: 'iu',
- 144: 'gd',
- 145: 'gv',
- 146: 'ga',
- 147: 'to',
- 148: 'el-polyton',
- 149: 'kl',
- 150: 'az',
- 151: 'nn',
+ 0: "en",
+ 1: "fr",
+ 2: "de",
+ 3: "it",
+ 4: "nl",
+ 5: "sv",
+ 6: "es",
+ 7: "da",
+ 8: "pt",
+ 9: "no",
+ 10: "he",
+ 11: "ja",
+ 12: "ar",
+ 13: "fi",
+ 14: "el",
+ 15: "is",
+ 16: "mt",
+ 17: "tr",
+ 18: "hr",
+ 19: "zh-Hant",
+ 20: "ur",
+ 21: "hi",
+ 22: "th",
+ 23: "ko",
+ 24: "lt",
+ 25: "pl",
+ 26: "hu",
+ 27: "es",
+ 28: "lv",
+ 29: "se",
+ 30: "fo",
+ 31: "fa",
+ 32: "ru",
+ 33: "zh",
+ 34: "nl-BE",
+ 35: "ga",
+ 36: "sq",
+ 37: "ro",
+ 38: "cz",
+ 39: "sk",
+ 40: "sl",
+ 41: "yi",
+ 42: "sr",
+ 43: "mk",
+ 44: "bg",
+ 45: "uk",
+ 46: "be",
+ 47: "uz",
+ 48: "kk",
+ 49: "az-Cyrl",
+ 50: "az-Arab",
+ 51: "hy",
+ 52: "ka",
+ 53: "mo",
+ 54: "ky",
+ 55: "tg",
+ 56: "tk",
+ 57: "mn-CN",
+ 58: "mn",
+ 59: "ps",
+ 60: "ks",
+ 61: "ku",
+ 62: "sd",
+ 63: "bo",
+ 64: "ne",
+ 65: "sa",
+ 66: "mr",
+ 67: "bn",
+ 68: "as",
+ 69: "gu",
+ 70: "pa",
+ 71: "or",
+ 72: "ml",
+ 73: "kn",
+ 74: "ta",
+ 75: "te",
+ 76: "si",
+ 77: "my",
+ 78: "km",
+ 79: "lo",
+ 80: "vi",
+ 81: "id",
+ 82: "tl",
+ 83: "ms",
+ 84: "ms-Arab",
+ 85: "am",
+ 86: "ti",
+ 87: "om",
+ 88: "so",
+ 89: "sw",
+ 90: "rw",
+ 91: "rn",
+ 92: "ny",
+ 93: "mg",
+ 94: "eo",
+ 128: "cy",
+ 129: "eu",
+ 130: "ca",
+ 131: "la",
+ 132: "qu",
+ 133: "gn",
+ 134: "ay",
+ 135: "tt",
+ 136: "ug",
+ 137: "dz",
+ 138: "jv",
+ 139: "su",
+ 140: "gl",
+ 141: "af",
+ 142: "br",
+ 143: "iu",
+ 144: "gd",
+ 145: "gv",
+ 146: "ga",
+ 147: "to",
+ 148: "el-polyton",
+ 149: "kl",
+ 150: "az",
+ 151: "nn",
}
-_WINDOWS_LANGUAGE_CODES = {lang.lower(): code for code, lang in _WINDOWS_LANGUAGES.items()}
+_WINDOWS_LANGUAGE_CODES = {
+ lang.lower(): code for code, lang in _WINDOWS_LANGUAGES.items()
+}
_MAC_LANGUAGE_CODES = {lang.lower(): code for code, lang in _MAC_LANGUAGES.items()}
@@ -1079,5 +1161,68 @@ _MAC_LANGUAGE_TO_SCRIPT = {
148: 6, # langGreekAncient → smRoman
149: 0, # langGreenlandic → smRoman
150: 0, # langAzerbaijanRoman → smRoman
- 151: 0, # langNynorsk → smRoman
+ 151: 0, # langNynorsk → smRoman
}
+
+
+class NameRecordVisitor(TTVisitor):
+ # Font tables that have NameIDs we need to collect.
+ TABLES = ("GSUB", "GPOS", "fvar", "CPAL", "STAT")
+
+ def __init__(self):
+ self.seen = set()
+
+
+@NameRecordVisitor.register_attrs(
+ (
+ (otTables.FeatureParamsSize, ("SubfamilyID", "SubfamilyNameID")),
+ (otTables.FeatureParamsStylisticSet, ("UINameID",)),
+ (
+ otTables.FeatureParamsCharacterVariants,
+ (
+ "FeatUILabelNameID",
+ "FeatUITooltipTextNameID",
+ "SampleTextNameID",
+ "FirstParamUILabelNameID",
+ ),
+ ),
+ (otTables.STAT, ("ElidedFallbackNameID",)),
+ (otTables.AxisRecord, ("AxisNameID",)),
+ (otTables.AxisValue, ("ValueNameID",)),
+ (otTables.FeatureName, ("FeatureNameID",)),
+ (otTables.Setting, ("SettingNameID",)),
+ )
+)
+def visit(visitor, obj, attr, value):
+ visitor.seen.add(value)
+
+
+@NameRecordVisitor.register(ttLib.getTableClass("fvar"))
+def visit(visitor, obj):
+ for inst in obj.instances:
+ if inst.postscriptNameID != 0xFFFF:
+ visitor.seen.add(inst.postscriptNameID)
+ visitor.seen.add(inst.subfamilyNameID)
+
+ for axis in obj.axes:
+ visitor.seen.add(axis.axisNameID)
+
+
+@NameRecordVisitor.register(ttLib.getTableClass("CPAL"))
+def visit(visitor, obj):
+ if obj.version == 1:
+ visitor.seen.update(obj.paletteLabels)
+ visitor.seen.update(obj.paletteEntryLabels)
+
+
+@NameRecordVisitor.register(ttLib.TTFont)
+def visit(visitor, font, *args, **kwargs):
+ if hasattr(visitor, "font"):
+ return False
+
+ visitor.font = font
+ for tag in visitor.TABLES:
+ if tag in font:
+ visitor.visit(font[tag], *args, **kwargs)
+ del visitor.font
+ return False
diff --git a/Lib/fontTools/ttLib/tables/_p_o_s_t.py b/Lib/fontTools/ttLib/tables/_p_o_s_t.py
index c54b87f0..dba63711 100644
--- a/Lib/fontTools/ttLib/tables/_p_o_s_t.py
+++ b/Lib/fontTools/ttLib/tables/_p_o_s_t.py
@@ -27,266 +27,282 @@ postFormatSize = sstruct.calcsize(postFormat)
class table__p_o_s_t(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ sstruct.unpack(postFormat, data[:postFormatSize], self)
+ data = data[postFormatSize:]
+ if self.formatType == 1.0:
+ self.decode_format_1_0(data, ttFont)
+ elif self.formatType == 2.0:
+ self.decode_format_2_0(data, ttFont)
+ elif self.formatType == 3.0:
+ self.decode_format_3_0(data, ttFont)
+ elif self.formatType == 4.0:
+ self.decode_format_4_0(data, ttFont)
+ else:
+ # supported format
+ raise ttLib.TTLibError(
+ "'post' table format %f not supported" % self.formatType
+ )
- def decompile(self, data, ttFont):
- sstruct.unpack(postFormat, data[:postFormatSize], self)
- data = data[postFormatSize:]
- if self.formatType == 1.0:
- self.decode_format_1_0(data, ttFont)
- elif self.formatType == 2.0:
- self.decode_format_2_0(data, ttFont)
- elif self.formatType == 3.0:
- self.decode_format_3_0(data, ttFont)
- elif self.formatType == 4.0:
- self.decode_format_4_0(data, ttFont)
- else:
- # supported format
- raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType)
+ def compile(self, ttFont):
+ data = sstruct.pack(postFormat, self)
+ if self.formatType == 1.0:
+ pass # we're done
+ elif self.formatType == 2.0:
+ data = data + self.encode_format_2_0(ttFont)
+ elif self.formatType == 3.0:
+ pass # we're done
+ elif self.formatType == 4.0:
+ data = data + self.encode_format_4_0(ttFont)
+ else:
+ # supported format
+ raise ttLib.TTLibError(
+ "'post' table format %f not supported" % self.formatType
+ )
+ return data
- def compile(self, ttFont):
- data = sstruct.pack(postFormat, self)
- if self.formatType == 1.0:
- pass # we're done
- elif self.formatType == 2.0:
- data = data + self.encode_format_2_0(ttFont)
- elif self.formatType == 3.0:
- pass # we're done
- elif self.formatType == 4.0:
- data = data + self.encode_format_4_0(ttFont)
- else:
- # supported format
- raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType)
- return data
+ def getGlyphOrder(self):
+ """This function will get called by a ttLib.TTFont instance.
+ Do not call this function yourself, use TTFont().getGlyphOrder()
+ or its relatives instead!
+ """
+ if not hasattr(self, "glyphOrder"):
+ raise ttLib.TTLibError("illegal use of getGlyphOrder()")
+ glyphOrder = self.glyphOrder
+ del self.glyphOrder
+ return glyphOrder
- def getGlyphOrder(self):
- """This function will get called by a ttLib.TTFont instance.
- Do not call this function yourself, use TTFont().getGlyphOrder()
- or its relatives instead!
- """
- if not hasattr(self, "glyphOrder"):
- raise ttLib.TTLibError("illegal use of getGlyphOrder()")
- glyphOrder = self.glyphOrder
- del self.glyphOrder
- return glyphOrder
+ def decode_format_1_0(self, data, ttFont):
+ self.glyphOrder = standardGlyphOrder[: ttFont["maxp"].numGlyphs]
- def decode_format_1_0(self, data, ttFont):
- self.glyphOrder = standardGlyphOrder[:ttFont["maxp"].numGlyphs]
+ def decode_format_2_0(self, data, ttFont):
+ (numGlyphs,) = struct.unpack(">H", data[:2])
+ numGlyphs = int(numGlyphs)
+ if numGlyphs > ttFont["maxp"].numGlyphs:
+ # Assume the numGlyphs field is bogus, so sync with maxp.
+ # I've seen this in one font, and if the assumption is
+ # wrong elsewhere, well, so be it: it's hard enough to
+ # work around _one_ non-conforming post format...
+ numGlyphs = ttFont["maxp"].numGlyphs
+ data = data[2:]
+ indices = array.array("H")
+ indices.frombytes(data[: 2 * numGlyphs])
+ if sys.byteorder != "big":
+ indices.byteswap()
+ data = data[2 * numGlyphs :]
+ maxIndex = max(indices)
+ self.extraNames = extraNames = unpackPStrings(data, maxIndex - 257)
+ self.glyphOrder = glyphOrder = [""] * int(ttFont["maxp"].numGlyphs)
+ for glyphID in range(numGlyphs):
+ index = indices[glyphID]
+ if index > 257:
+ try:
+ name = extraNames[index - 258]
+ except IndexError:
+ name = ""
+ else:
+ # fetch names from standard list
+ name = standardGlyphOrder[index]
+ glyphOrder[glyphID] = name
+ self.build_psNameMapping(ttFont)
- def decode_format_2_0(self, data, ttFont):
- numGlyphs, = struct.unpack(">H", data[:2])
- numGlyphs = int(numGlyphs)
- if numGlyphs > ttFont['maxp'].numGlyphs:
- # Assume the numGlyphs field is bogus, so sync with maxp.
- # I've seen this in one font, and if the assumption is
- # wrong elsewhere, well, so be it: it's hard enough to
- # work around _one_ non-conforming post format...
- numGlyphs = ttFont['maxp'].numGlyphs
- data = data[2:]
- indices = array.array("H")
- indices.frombytes(data[:2*numGlyphs])
- if sys.byteorder != "big": indices.byteswap()
- data = data[2*numGlyphs:]
- maxIndex = max(indices)
- self.extraNames = extraNames = unpackPStrings(data, maxIndex-257)
- self.glyphOrder = glyphOrder = [""] * int(ttFont['maxp'].numGlyphs)
- for glyphID in range(numGlyphs):
- index = indices[glyphID]
- if index > 257:
- try:
- name = extraNames[index-258]
- except IndexError:
- name = ""
- else:
- # fetch names from standard list
- name = standardGlyphOrder[index]
- glyphOrder[glyphID] = name
- self.build_psNameMapping(ttFont)
+ def build_psNameMapping(self, ttFont):
+ mapping = {}
+ allNames = {}
+ for i in range(ttFont["maxp"].numGlyphs):
+ glyphName = psName = self.glyphOrder[i]
+ if glyphName == "":
+ glyphName = "glyph%.5d" % i
+ if glyphName in allNames:
+ # make up a new glyphName that's unique
+ n = allNames[glyphName]
+ while (glyphName + "#" + str(n)) in allNames:
+ n += 1
+ allNames[glyphName] = n + 1
+ glyphName = glyphName + "#" + str(n)
- def build_psNameMapping(self, ttFont):
- mapping = {}
- allNames = {}
- for i in range(ttFont['maxp'].numGlyphs):
- glyphName = psName = self.glyphOrder[i]
- if glyphName == "":
- glyphName = "glyph%.5d" % i
- if glyphName in allNames:
- # make up a new glyphName that's unique
- n = allNames[glyphName]
- while (glyphName + "#" + str(n)) in allNames:
- n += 1
- allNames[glyphName] = n + 1
- glyphName = glyphName + "#" + str(n)
+ self.glyphOrder[i] = glyphName
+ allNames[glyphName] = 1
+ if glyphName != psName:
+ mapping[glyphName] = psName
- self.glyphOrder[i] = glyphName
- allNames[glyphName] = 1
- if glyphName != psName:
- mapping[glyphName] = psName
+ self.mapping = mapping
- self.mapping = mapping
+ def decode_format_3_0(self, data, ttFont):
+ # Setting self.glyphOrder to None will cause the TTFont object
+ # try and construct glyph names from a Unicode cmap table.
+ self.glyphOrder = None
- def decode_format_3_0(self, data, ttFont):
- # Setting self.glyphOrder to None will cause the TTFont object
- # try and construct glyph names from a Unicode cmap table.
- self.glyphOrder = None
+ def decode_format_4_0(self, data, ttFont):
+ from fontTools import agl
- def decode_format_4_0(self, data, ttFont):
- from fontTools import agl
- numGlyphs = ttFont['maxp'].numGlyphs
- indices = array.array("H")
- indices.frombytes(data)
- if sys.byteorder != "big": indices.byteswap()
- # In some older fonts, the size of the post table doesn't match
- # the number of glyphs. Sometimes it's bigger, sometimes smaller.
- self.glyphOrder = glyphOrder = [''] * int(numGlyphs)
- for i in range(min(len(indices),numGlyphs)):
- if indices[i] == 0xFFFF:
- self.glyphOrder[i] = ''
- elif indices[i] in agl.UV2AGL:
- self.glyphOrder[i] = agl.UV2AGL[indices[i]]
- else:
- self.glyphOrder[i] = "uni%04X" % indices[i]
- self.build_psNameMapping(ttFont)
+ numGlyphs = ttFont["maxp"].numGlyphs
+ indices = array.array("H")
+ indices.frombytes(data)
+ if sys.byteorder != "big":
+ indices.byteswap()
+ # In some older fonts, the size of the post table doesn't match
+ # the number of glyphs. Sometimes it's bigger, sometimes smaller.
+ self.glyphOrder = glyphOrder = [""] * int(numGlyphs)
+ for i in range(min(len(indices), numGlyphs)):
+ if indices[i] == 0xFFFF:
+ self.glyphOrder[i] = ""
+ elif indices[i] in agl.UV2AGL:
+ self.glyphOrder[i] = agl.UV2AGL[indices[i]]
+ else:
+ self.glyphOrder[i] = "uni%04X" % indices[i]
+ self.build_psNameMapping(ttFont)
- def encode_format_2_0(self, ttFont):
- numGlyphs = ttFont['maxp'].numGlyphs
- glyphOrder = ttFont.getGlyphOrder()
- assert len(glyphOrder) == numGlyphs
- indices = array.array("H")
- extraDict = {}
- extraNames = self.extraNames = [
- n for n in self.extraNames if n not in standardGlyphOrder]
- for i in range(len(extraNames)):
- extraDict[extraNames[i]] = i
- for glyphID in range(numGlyphs):
- glyphName = glyphOrder[glyphID]
- if glyphName in self.mapping:
- psName = self.mapping[glyphName]
- else:
- psName = glyphName
- if psName in extraDict:
- index = 258 + extraDict[psName]
- elif psName in standardGlyphOrder:
- index = standardGlyphOrder.index(psName)
- else:
- index = 258 + len(extraNames)
- extraDict[psName] = len(extraNames)
- extraNames.append(psName)
- indices.append(index)
- if sys.byteorder != "big": indices.byteswap()
- return struct.pack(">H", numGlyphs) + indices.tobytes() + packPStrings(extraNames)
+ def encode_format_2_0(self, ttFont):
+ numGlyphs = ttFont["maxp"].numGlyphs
+ glyphOrder = ttFont.getGlyphOrder()
+ assert len(glyphOrder) == numGlyphs
+ indices = array.array("H")
+ extraDict = {}
+ extraNames = self.extraNames = [
+ n for n in self.extraNames if n not in standardGlyphOrder
+ ]
+ for i in range(len(extraNames)):
+ extraDict[extraNames[i]] = i
+ for glyphID in range(numGlyphs):
+ glyphName = glyphOrder[glyphID]
+ if glyphName in self.mapping:
+ psName = self.mapping[glyphName]
+ else:
+ psName = glyphName
+ if psName in extraDict:
+ index = 258 + extraDict[psName]
+ elif psName in standardGlyphOrder:
+ index = standardGlyphOrder.index(psName)
+ else:
+ index = 258 + len(extraNames)
+ extraDict[psName] = len(extraNames)
+ extraNames.append(psName)
+ indices.append(index)
+ if sys.byteorder != "big":
+ indices.byteswap()
+ return (
+ struct.pack(">H", numGlyphs) + indices.tobytes() + packPStrings(extraNames)
+ )
- def encode_format_4_0(self, ttFont):
- from fontTools import agl
- numGlyphs = ttFont['maxp'].numGlyphs
- glyphOrder = ttFont.getGlyphOrder()
- assert len(glyphOrder) == numGlyphs
- indices = array.array("H")
- for glyphID in glyphOrder:
- glyphID = glyphID.split('#')[0]
- if glyphID in agl.AGL2UV:
- indices.append(agl.AGL2UV[glyphID])
- elif len(glyphID) == 7 and glyphID[:3] == 'uni':
- indices.append(int(glyphID[3:],16))
- else:
- indices.append(0xFFFF)
- if sys.byteorder != "big": indices.byteswap()
- return indices.tobytes()
+ def encode_format_4_0(self, ttFont):
+ from fontTools import agl
- def toXML(self, writer, ttFont):
- formatstring, names, fixes = sstruct.getformat(postFormat)
- for name in names:
- value = getattr(self, name)
- writer.simpletag(name, value=value)
- writer.newline()
- if hasattr(self, "mapping"):
- writer.begintag("psNames")
- writer.newline()
- writer.comment("This file uses unique glyph names based on the information\n"
- "found in the 'post' table. Since these names might not be unique,\n"
- "we have to invent artificial names in case of clashes. In order to\n"
- "be able to retain the original information, we need a name to\n"
- "ps name mapping for those cases where they differ. That's what\n"
- "you see below.\n")
- writer.newline()
- items = sorted(self.mapping.items())
- for name, psName in items:
- writer.simpletag("psName", name=name, psName=psName)
- writer.newline()
- writer.endtag("psNames")
- writer.newline()
- if hasattr(self, "extraNames"):
- writer.begintag("extraNames")
- writer.newline()
- writer.comment("following are the name that are not taken from the standard Mac glyph order")
- writer.newline()
- for name in self.extraNames:
- writer.simpletag("psName", name=name)
- writer.newline()
- writer.endtag("extraNames")
- writer.newline()
- if hasattr(self, "data"):
- writer.begintag("hexdata")
- writer.newline()
- writer.dumphex(self.data)
- writer.endtag("hexdata")
- writer.newline()
+ numGlyphs = ttFont["maxp"].numGlyphs
+ glyphOrder = ttFont.getGlyphOrder()
+ assert len(glyphOrder) == numGlyphs
+ indices = array.array("H")
+ for glyphID in glyphOrder:
+ glyphID = glyphID.split("#")[0]
+ if glyphID in agl.AGL2UV:
+ indices.append(agl.AGL2UV[glyphID])
+ elif len(glyphID) == 7 and glyphID[:3] == "uni":
+ indices.append(int(glyphID[3:], 16))
+ else:
+ indices.append(0xFFFF)
+ if sys.byteorder != "big":
+ indices.byteswap()
+ return indices.tobytes()
- def fromXML(self, name, attrs, content, ttFont):
- if name not in ("psNames", "extraNames", "hexdata"):
- setattr(self, name, safeEval(attrs["value"]))
- elif name == "psNames":
- self.mapping = {}
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == "psName":
- self.mapping[attrs["name"]] = attrs["psName"]
- elif name == "extraNames":
- self.extraNames = []
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == "psName":
- self.extraNames.append(attrs["name"])
- else:
- self.data = readHex(content)
+ def toXML(self, writer, ttFont):
+ formatstring, names, fixes = sstruct.getformat(postFormat)
+ for name in names:
+ value = getattr(self, name)
+ writer.simpletag(name, value=value)
+ writer.newline()
+ if hasattr(self, "mapping"):
+ writer.begintag("psNames")
+ writer.newline()
+ writer.comment(
+ "This file uses unique glyph names based on the information\n"
+ "found in the 'post' table. Since these names might not be unique,\n"
+ "we have to invent artificial names in case of clashes. In order to\n"
+ "be able to retain the original information, we need a name to\n"
+ "ps name mapping for those cases where they differ. That's what\n"
+ "you see below.\n"
+ )
+ writer.newline()
+ items = sorted(self.mapping.items())
+ for name, psName in items:
+ writer.simpletag("psName", name=name, psName=psName)
+ writer.newline()
+ writer.endtag("psNames")
+ writer.newline()
+ if hasattr(self, "extraNames"):
+ writer.begintag("extraNames")
+ writer.newline()
+ writer.comment(
+ "following are the name that are not taken from the standard Mac glyph order"
+ )
+ writer.newline()
+ for name in self.extraNames:
+ writer.simpletag("psName", name=name)
+ writer.newline()
+ writer.endtag("extraNames")
+ writer.newline()
+ if hasattr(self, "data"):
+ writer.begintag("hexdata")
+ writer.newline()
+ writer.dumphex(self.data)
+ writer.endtag("hexdata")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name not in ("psNames", "extraNames", "hexdata"):
+ setattr(self, name, safeEval(attrs["value"]))
+ elif name == "psNames":
+ self.mapping = {}
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "psName":
+ self.mapping[attrs["name"]] = attrs["psName"]
+ elif name == "extraNames":
+ self.extraNames = []
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "psName":
+ self.extraNames.append(attrs["name"])
+ else:
+ self.data = readHex(content)
def unpackPStrings(data, n):
- # extract n Pascal strings from data.
- # if there is not enough data, use ""
+ # extract n Pascal strings from data.
+ # if there is not enough data, use ""
- strings = []
- index = 0
- dataLen = len(data)
+ strings = []
+ index = 0
+ dataLen = len(data)
- for _ in range(n):
- if dataLen <= index:
- length = 0
- else:
- length = byteord(data[index])
- index += 1
+ for _ in range(n):
+ if dataLen <= index:
+ length = 0
+ else:
+ length = byteord(data[index])
+ index += 1
- if dataLen <= index + length - 1:
- name = ""
- else:
- name = tostr(data[index:index+length], encoding="latin1")
- strings.append (name)
- index += length
+ if dataLen <= index + length - 1:
+ name = ""
+ else:
+ name = tostr(data[index : index + length], encoding="latin1")
+ strings.append(name)
+ index += length
- if index < dataLen:
- log.warning("%d extra bytes in post.stringData array", dataLen - index)
+ if index < dataLen:
+ log.warning("%d extra bytes in post.stringData array", dataLen - index)
- elif dataLen < index:
- log.warning("not enough data in post.stringData array")
+ elif dataLen < index:
+ log.warning("not enough data in post.stringData array")
- return strings
+ return strings
def packPStrings(strings):
- data = b""
- for s in strings:
- data = data + bytechr(len(s)) + tobytes(s, encoding="latin1")
- return data
+ data = b""
+ for s in strings:
+ data = data + bytechr(len(s)) + tobytes(s, encoding="latin1")
+ return data
diff --git a/Lib/fontTools/ttLib/tables/_p_r_e_p.py b/Lib/fontTools/ttLib/tables/_p_r_e_p.py
index 7f517fb8..b4b92f3e 100644
--- a/Lib/fontTools/ttLib/tables/_p_r_e_p.py
+++ b/Lib/fontTools/ttLib/tables/_p_r_e_p.py
@@ -2,5 +2,6 @@ from fontTools import ttLib
superclass = ttLib.getTableClass("fpgm")
+
class table__p_r_e_p(superclass):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/_s_b_i_x.py b/Lib/fontTools/ttLib/tables/_s_b_i_x.py
index c4b2ad38..29b82c3e 100644
--- a/Lib/fontTools/ttLib/tables/_s_b_i_x.py
+++ b/Lib/fontTools/ttLib/tables/_s_b_i_x.py
@@ -28,88 +28,92 @@ sbixStrikeOffsetFormatSize = sstruct.calcsize(sbixStrikeOffsetFormat)
class table__s_b_i_x(DefaultTable.DefaultTable):
-
- def __init__(self, tag=None):
- DefaultTable.DefaultTable.__init__(self, tag)
- self.version = 1
- self.flags = 1
- self.numStrikes = 0
- self.strikes = {}
- self.strikeOffsets = []
-
- def decompile(self, data, ttFont):
- # read table header
- sstruct.unpack(sbixHeaderFormat, data[ : sbixHeaderFormatSize], self)
- # collect offsets to individual strikes in self.strikeOffsets
- for i in range(self.numStrikes):
- current_offset = sbixHeaderFormatSize + i * sbixStrikeOffsetFormatSize
- offset_entry = sbixStrikeOffset()
- sstruct.unpack(sbixStrikeOffsetFormat, \
- data[current_offset:current_offset+sbixStrikeOffsetFormatSize], \
- offset_entry)
- self.strikeOffsets.append(offset_entry.strikeOffset)
-
- # decompile Strikes
- for i in range(self.numStrikes-1, -1, -1):
- current_strike = Strike(rawdata=data[self.strikeOffsets[i]:])
- data = data[:self.strikeOffsets[i]]
- current_strike.decompile(ttFont)
- #print " Strike length: %xh" % len(bitmapSetData)
- #print "Number of Glyph entries:", len(current_strike.glyphs)
- if current_strike.ppem in self.strikes:
- from fontTools import ttLib
- raise ttLib.TTLibError("Pixel 'ppem' must be unique for each Strike")
- self.strikes[current_strike.ppem] = current_strike
-
- # after the glyph data records have been extracted, we don't need the offsets anymore
- del self.strikeOffsets
- del self.numStrikes
-
- def compile(self, ttFont):
- sbixData = b""
- self.numStrikes = len(self.strikes)
- sbixHeader = sstruct.pack(sbixHeaderFormat, self)
-
- # calculate offset to start of first strike
- setOffset = sbixHeaderFormatSize + sbixStrikeOffsetFormatSize * self.numStrikes
-
- for si in sorted(self.strikes.keys()):
- current_strike = self.strikes[si]
- current_strike.compile(ttFont)
- # append offset to this strike to table header
- current_strike.strikeOffset = setOffset
- sbixHeader += sstruct.pack(sbixStrikeOffsetFormat, current_strike)
- setOffset += len(current_strike.data)
- sbixData += current_strike.data
-
- return sbixHeader + sbixData
-
- def toXML(self, xmlWriter, ttFont):
- xmlWriter.simpletag("version", value=self.version)
- xmlWriter.newline()
- xmlWriter.simpletag("flags", value=num2binary(self.flags, 16))
- xmlWriter.newline()
- for i in sorted(self.strikes.keys()):
- self.strikes[i].toXML(xmlWriter, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if name =="version":
- setattr(self, name, safeEval(attrs["value"]))
- elif name == "flags":
- setattr(self, name, binary2num(attrs["value"]))
- elif name == "strike":
- current_strike = Strike()
- for element in content:
- if isinstance(element, tuple):
- name, attrs, content = element
- current_strike.fromXML(name, attrs, content, ttFont)
- self.strikes[current_strike.ppem] = current_strike
- else:
- from fontTools import ttLib
- raise ttLib.TTLibError("can't handle '%s' element" % name)
+ def __init__(self, tag=None):
+ DefaultTable.DefaultTable.__init__(self, tag)
+ self.version = 1
+ self.flags = 1
+ self.numStrikes = 0
+ self.strikes = {}
+ self.strikeOffsets = []
+
+ def decompile(self, data, ttFont):
+ # read table header
+ sstruct.unpack(sbixHeaderFormat, data[:sbixHeaderFormatSize], self)
+ # collect offsets to individual strikes in self.strikeOffsets
+ for i in range(self.numStrikes):
+ current_offset = sbixHeaderFormatSize + i * sbixStrikeOffsetFormatSize
+ offset_entry = sbixStrikeOffset()
+ sstruct.unpack(
+ sbixStrikeOffsetFormat,
+ data[current_offset : current_offset + sbixStrikeOffsetFormatSize],
+ offset_entry,
+ )
+ self.strikeOffsets.append(offset_entry.strikeOffset)
+
+ # decompile Strikes
+ for i in range(self.numStrikes - 1, -1, -1):
+ current_strike = Strike(rawdata=data[self.strikeOffsets[i] :])
+ data = data[: self.strikeOffsets[i]]
+ current_strike.decompile(ttFont)
+ # print " Strike length: %xh" % len(bitmapSetData)
+ # print "Number of Glyph entries:", len(current_strike.glyphs)
+ if current_strike.ppem in self.strikes:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError("Pixel 'ppem' must be unique for each Strike")
+ self.strikes[current_strike.ppem] = current_strike
+
+ # after the glyph data records have been extracted, we don't need the offsets anymore
+ del self.strikeOffsets
+ del self.numStrikes
+
+ def compile(self, ttFont):
+ sbixData = b""
+ self.numStrikes = len(self.strikes)
+ sbixHeader = sstruct.pack(sbixHeaderFormat, self)
+
+ # calculate offset to start of first strike
+ setOffset = sbixHeaderFormatSize + sbixStrikeOffsetFormatSize * self.numStrikes
+
+ for si in sorted(self.strikes.keys()):
+ current_strike = self.strikes[si]
+ current_strike.compile(ttFont)
+ # append offset to this strike to table header
+ current_strike.strikeOffset = setOffset
+ sbixHeader += sstruct.pack(sbixStrikeOffsetFormat, current_strike)
+ setOffset += len(current_strike.data)
+ sbixData += current_strike.data
+
+ return sbixHeader + sbixData
+
+ def toXML(self, xmlWriter, ttFont):
+ xmlWriter.simpletag("version", value=self.version)
+ xmlWriter.newline()
+ xmlWriter.simpletag("flags", value=num2binary(self.flags, 16))
+ xmlWriter.newline()
+ for i in sorted(self.strikes.keys()):
+ self.strikes[i].toXML(xmlWriter, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "version":
+ setattr(self, name, safeEval(attrs["value"]))
+ elif name == "flags":
+ setattr(self, name, binary2num(attrs["value"]))
+ elif name == "strike":
+ current_strike = Strike()
+ for element in content:
+ if isinstance(element, tuple):
+ name, attrs, content = element
+ current_strike.fromXML(name, attrs, content, ttFont)
+ self.strikes[current_strike.ppem] = current_strike
+ else:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError("can't handle '%s' element" % name)
# Helper classes
+
class sbixStrikeOffset(object):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/_t_r_a_k.py b/Lib/fontTools/ttLib/tables/_t_r_a_k.py
index 3052496f..0d1b313e 100644
--- a/Lib/fontTools/ttLib/tables/_t_r_a_k.py
+++ b/Lib/fontTools/ttLib/tables/_t_r_a_k.py
@@ -1,9 +1,9 @@
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import (
- fixedToFloat as fi2fl,
- floatToFixed as fl2fi,
- floatToFixedToStr as fl2str,
- strToFixedToFloat as str2fl,
+ fixedToFloat as fi2fl,
+ floatToFixed as fl2fi,
+ floatToFixedToStr as fl2str,
+ strToFixedToFloat as str2fl,
)
from fontTools.misc.textTools import bytesjoin, safeEval
from fontTools.ttLib import TTLibError
@@ -58,257 +58,268 @@ PER_SIZE_VALUE_FORMAT_SIZE = struct.calcsize(PER_SIZE_VALUE_FORMAT)
class table__t_r_a_k(DefaultTable.DefaultTable):
- dependencies = ['name']
-
- def compile(self, ttFont):
- dataList = []
- offset = TRAK_HEADER_FORMAT_SIZE
- for direction in ('horiz', 'vert'):
- trackData = getattr(self, direction + 'Data', TrackData())
- offsetName = direction + 'Offset'
- # set offset to 0 if None or empty
- if not trackData:
- setattr(self, offsetName, 0)
- continue
- # TrackData table format must be longword aligned
- alignedOffset = (offset + 3) & ~3
- padding, offset = b"\x00"*(alignedOffset - offset), alignedOffset
- setattr(self, offsetName, offset)
-
- data = trackData.compile(offset)
- offset += len(data)
- dataList.append(padding + data)
-
- self.reserved = 0
- tableData = bytesjoin([sstruct.pack(TRAK_HEADER_FORMAT, self)] + dataList)
- return tableData
-
- def decompile(self, data, ttFont):
- sstruct.unpack(TRAK_HEADER_FORMAT, data[:TRAK_HEADER_FORMAT_SIZE], self)
- for direction in ('horiz', 'vert'):
- trackData = TrackData()
- offset = getattr(self, direction + 'Offset')
- if offset != 0:
- trackData.decompile(data, offset)
- setattr(self, direction + 'Data', trackData)
-
- def toXML(self, writer, ttFont):
- writer.simpletag('version', value=self.version)
- writer.newline()
- writer.simpletag('format', value=self.format)
- writer.newline()
- for direction in ('horiz', 'vert'):
- dataName = direction + 'Data'
- writer.begintag(dataName)
- writer.newline()
- trackData = getattr(self, dataName, TrackData())
- trackData.toXML(writer, ttFont)
- writer.endtag(dataName)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == 'version':
- self.version = safeEval(attrs['value'])
- elif name == 'format':
- self.format = safeEval(attrs['value'])
- elif name in ('horizData', 'vertData'):
- trackData = TrackData()
- setattr(self, name, trackData)
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content_ = element
- trackData.fromXML(name, attrs, content_, ttFont)
+ dependencies = ["name"]
+
+ def compile(self, ttFont):
+ dataList = []
+ offset = TRAK_HEADER_FORMAT_SIZE
+ for direction in ("horiz", "vert"):
+ trackData = getattr(self, direction + "Data", TrackData())
+ offsetName = direction + "Offset"
+ # set offset to 0 if None or empty
+ if not trackData:
+ setattr(self, offsetName, 0)
+ continue
+ # TrackData table format must be longword aligned
+ alignedOffset = (offset + 3) & ~3
+ padding, offset = b"\x00" * (alignedOffset - offset), alignedOffset
+ setattr(self, offsetName, offset)
+
+ data = trackData.compile(offset)
+ offset += len(data)
+ dataList.append(padding + data)
+
+ self.reserved = 0
+ tableData = bytesjoin([sstruct.pack(TRAK_HEADER_FORMAT, self)] + dataList)
+ return tableData
+
+ def decompile(self, data, ttFont):
+ sstruct.unpack(TRAK_HEADER_FORMAT, data[:TRAK_HEADER_FORMAT_SIZE], self)
+ for direction in ("horiz", "vert"):
+ trackData = TrackData()
+ offset = getattr(self, direction + "Offset")
+ if offset != 0:
+ trackData.decompile(data, offset)
+ setattr(self, direction + "Data", trackData)
+
+ def toXML(self, writer, ttFont):
+ writer.simpletag("version", value=self.version)
+ writer.newline()
+ writer.simpletag("format", value=self.format)
+ writer.newline()
+ for direction in ("horiz", "vert"):
+ dataName = direction + "Data"
+ writer.begintag(dataName)
+ writer.newline()
+ trackData = getattr(self, dataName, TrackData())
+ trackData.toXML(writer, ttFont)
+ writer.endtag(dataName)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "version":
+ self.version = safeEval(attrs["value"])
+ elif name == "format":
+ self.format = safeEval(attrs["value"])
+ elif name in ("horizData", "vertData"):
+ trackData = TrackData()
+ setattr(self, name, trackData)
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content_ = element
+ trackData.fromXML(name, attrs, content_, ttFont)
class TrackData(MutableMapping):
-
- def __init__(self, initialdata={}):
- self._map = dict(initialdata)
-
- def compile(self, offset):
- nTracks = len(self)
- sizes = self.sizes()
- nSizes = len(sizes)
-
- # offset to the start of the size subtable
- offset += TRACK_DATA_FORMAT_SIZE + TRACK_TABLE_ENTRY_FORMAT_SIZE*nTracks
- trackDataHeader = sstruct.pack(
- TRACK_DATA_FORMAT,
- {'nTracks': nTracks, 'nSizes': nSizes, 'sizeTableOffset': offset})
-
- entryDataList = []
- perSizeDataList = []
- # offset to per-size tracking values
- offset += SIZE_VALUE_FORMAT_SIZE*nSizes
- # sort track table entries by track value
- for track, entry in sorted(self.items()):
- assert entry.nameIndex is not None
- entry.track = track
- entry.offset = offset
- entryDataList += [sstruct.pack(TRACK_TABLE_ENTRY_FORMAT, entry)]
- # sort per-size values by size
- for size, value in sorted(entry.items()):
- perSizeDataList += [struct.pack(PER_SIZE_VALUE_FORMAT, value)]
- offset += PER_SIZE_VALUE_FORMAT_SIZE*nSizes
- # sort size values
- sizeDataList = [struct.pack(SIZE_VALUE_FORMAT, fl2fi(sv, 16)) for sv in sorted(sizes)]
-
- data = bytesjoin([trackDataHeader] + entryDataList + sizeDataList + perSizeDataList)
- return data
-
- def decompile(self, data, offset):
- # initial offset is from the start of trak table to the current TrackData
- trackDataHeader = data[offset:offset+TRACK_DATA_FORMAT_SIZE]
- if len(trackDataHeader) != TRACK_DATA_FORMAT_SIZE:
- raise TTLibError('not enough data to decompile TrackData header')
- sstruct.unpack(TRACK_DATA_FORMAT, trackDataHeader, self)
- offset += TRACK_DATA_FORMAT_SIZE
-
- nSizes = self.nSizes
- sizeTableOffset = self.sizeTableOffset
- sizeTable = []
- for i in range(nSizes):
- sizeValueData = data[sizeTableOffset:sizeTableOffset+SIZE_VALUE_FORMAT_SIZE]
- if len(sizeValueData) < SIZE_VALUE_FORMAT_SIZE:
- raise TTLibError('not enough data to decompile TrackData size subtable')
- sizeValue, = struct.unpack(SIZE_VALUE_FORMAT, sizeValueData)
- sizeTable.append(fi2fl(sizeValue, 16))
- sizeTableOffset += SIZE_VALUE_FORMAT_SIZE
-
- for i in range(self.nTracks):
- entry = TrackTableEntry()
- entryData = data[offset:offset+TRACK_TABLE_ENTRY_FORMAT_SIZE]
- if len(entryData) < TRACK_TABLE_ENTRY_FORMAT_SIZE:
- raise TTLibError('not enough data to decompile TrackTableEntry record')
- sstruct.unpack(TRACK_TABLE_ENTRY_FORMAT, entryData, entry)
- perSizeOffset = entry.offset
- for j in range(nSizes):
- size = sizeTable[j]
- perSizeValueData = data[perSizeOffset:perSizeOffset+PER_SIZE_VALUE_FORMAT_SIZE]
- if len(perSizeValueData) < PER_SIZE_VALUE_FORMAT_SIZE:
- raise TTLibError('not enough data to decompile per-size track values')
- perSizeValue, = struct.unpack(PER_SIZE_VALUE_FORMAT, perSizeValueData)
- entry[size] = perSizeValue
- perSizeOffset += PER_SIZE_VALUE_FORMAT_SIZE
- self[entry.track] = entry
- offset += TRACK_TABLE_ENTRY_FORMAT_SIZE
-
- def toXML(self, writer, ttFont):
- nTracks = len(self)
- nSizes = len(self.sizes())
- writer.comment("nTracks=%d, nSizes=%d" % (nTracks, nSizes))
- writer.newline()
- for track, entry in sorted(self.items()):
- assert entry.nameIndex is not None
- entry.track = track
- entry.toXML(writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if name != 'trackEntry':
- return
- entry = TrackTableEntry()
- entry.fromXML(name, attrs, content, ttFont)
- self[entry.track] = entry
-
- def sizes(self):
- if not self:
- return frozenset()
- tracks = list(self.tracks())
- sizes = self[tracks.pop(0)].sizes()
- for track in tracks:
- entrySizes = self[track].sizes()
- if sizes != entrySizes:
- raise TTLibError(
- "'trak' table entries must specify the same sizes: "
- "%s != %s" % (sorted(sizes), sorted(entrySizes)))
- return frozenset(sizes)
-
- def __getitem__(self, track):
- return self._map[track]
-
- def __delitem__(self, track):
- del self._map[track]
-
- def __setitem__(self, track, entry):
- self._map[track] = entry
-
- def __len__(self):
- return len(self._map)
-
- def __iter__(self):
- return iter(self._map)
-
- def keys(self):
- return self._map.keys()
-
- tracks = keys
-
- def __repr__(self):
- return "TrackData({})".format(self._map if self else "")
+ def __init__(self, initialdata={}):
+ self._map = dict(initialdata)
+
+ def compile(self, offset):
+ nTracks = len(self)
+ sizes = self.sizes()
+ nSizes = len(sizes)
+
+ # offset to the start of the size subtable
+ offset += TRACK_DATA_FORMAT_SIZE + TRACK_TABLE_ENTRY_FORMAT_SIZE * nTracks
+ trackDataHeader = sstruct.pack(
+ TRACK_DATA_FORMAT,
+ {"nTracks": nTracks, "nSizes": nSizes, "sizeTableOffset": offset},
+ )
+
+ entryDataList = []
+ perSizeDataList = []
+ # offset to per-size tracking values
+ offset += SIZE_VALUE_FORMAT_SIZE * nSizes
+ # sort track table entries by track value
+ for track, entry in sorted(self.items()):
+ assert entry.nameIndex is not None
+ entry.track = track
+ entry.offset = offset
+ entryDataList += [sstruct.pack(TRACK_TABLE_ENTRY_FORMAT, entry)]
+ # sort per-size values by size
+ for size, value in sorted(entry.items()):
+ perSizeDataList += [struct.pack(PER_SIZE_VALUE_FORMAT, value)]
+ offset += PER_SIZE_VALUE_FORMAT_SIZE * nSizes
+ # sort size values
+ sizeDataList = [
+ struct.pack(SIZE_VALUE_FORMAT, fl2fi(sv, 16)) for sv in sorted(sizes)
+ ]
+
+ data = bytesjoin(
+ [trackDataHeader] + entryDataList + sizeDataList + perSizeDataList
+ )
+ return data
+
+ def decompile(self, data, offset):
+ # initial offset is from the start of trak table to the current TrackData
+ trackDataHeader = data[offset : offset + TRACK_DATA_FORMAT_SIZE]
+ if len(trackDataHeader) != TRACK_DATA_FORMAT_SIZE:
+ raise TTLibError("not enough data to decompile TrackData header")
+ sstruct.unpack(TRACK_DATA_FORMAT, trackDataHeader, self)
+ offset += TRACK_DATA_FORMAT_SIZE
+
+ nSizes = self.nSizes
+ sizeTableOffset = self.sizeTableOffset
+ sizeTable = []
+ for i in range(nSizes):
+ sizeValueData = data[
+ sizeTableOffset : sizeTableOffset + SIZE_VALUE_FORMAT_SIZE
+ ]
+ if len(sizeValueData) < SIZE_VALUE_FORMAT_SIZE:
+ raise TTLibError("not enough data to decompile TrackData size subtable")
+ (sizeValue,) = struct.unpack(SIZE_VALUE_FORMAT, sizeValueData)
+ sizeTable.append(fi2fl(sizeValue, 16))
+ sizeTableOffset += SIZE_VALUE_FORMAT_SIZE
+
+ for i in range(self.nTracks):
+ entry = TrackTableEntry()
+ entryData = data[offset : offset + TRACK_TABLE_ENTRY_FORMAT_SIZE]
+ if len(entryData) < TRACK_TABLE_ENTRY_FORMAT_SIZE:
+ raise TTLibError("not enough data to decompile TrackTableEntry record")
+ sstruct.unpack(TRACK_TABLE_ENTRY_FORMAT, entryData, entry)
+ perSizeOffset = entry.offset
+ for j in range(nSizes):
+ size = sizeTable[j]
+ perSizeValueData = data[
+ perSizeOffset : perSizeOffset + PER_SIZE_VALUE_FORMAT_SIZE
+ ]
+ if len(perSizeValueData) < PER_SIZE_VALUE_FORMAT_SIZE:
+ raise TTLibError(
+ "not enough data to decompile per-size track values"
+ )
+ (perSizeValue,) = struct.unpack(PER_SIZE_VALUE_FORMAT, perSizeValueData)
+ entry[size] = perSizeValue
+ perSizeOffset += PER_SIZE_VALUE_FORMAT_SIZE
+ self[entry.track] = entry
+ offset += TRACK_TABLE_ENTRY_FORMAT_SIZE
+
+ def toXML(self, writer, ttFont):
+ nTracks = len(self)
+ nSizes = len(self.sizes())
+ writer.comment("nTracks=%d, nSizes=%d" % (nTracks, nSizes))
+ writer.newline()
+ for track, entry in sorted(self.items()):
+ assert entry.nameIndex is not None
+ entry.track = track
+ entry.toXML(writer, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name != "trackEntry":
+ return
+ entry = TrackTableEntry()
+ entry.fromXML(name, attrs, content, ttFont)
+ self[entry.track] = entry
+
+ def sizes(self):
+ if not self:
+ return frozenset()
+ tracks = list(self.tracks())
+ sizes = self[tracks.pop(0)].sizes()
+ for track in tracks:
+ entrySizes = self[track].sizes()
+ if sizes != entrySizes:
+ raise TTLibError(
+ "'trak' table entries must specify the same sizes: "
+ "%s != %s" % (sorted(sizes), sorted(entrySizes))
+ )
+ return frozenset(sizes)
+
+ def __getitem__(self, track):
+ return self._map[track]
+
+ def __delitem__(self, track):
+ del self._map[track]
+
+ def __setitem__(self, track, entry):
+ self._map[track] = entry
+
+ def __len__(self):
+ return len(self._map)
+
+ def __iter__(self):
+ return iter(self._map)
+
+ def keys(self):
+ return self._map.keys()
+
+ tracks = keys
+
+ def __repr__(self):
+ return "TrackData({})".format(self._map if self else "")
class TrackTableEntry(MutableMapping):
-
- def __init__(self, values={}, nameIndex=None):
- self.nameIndex = nameIndex
- self._map = dict(values)
-
- def toXML(self, writer, ttFont):
- name = ttFont["name"].getDebugName(self.nameIndex)
- writer.begintag(
- "trackEntry",
- (('value', fl2str(self.track, 16)), ('nameIndex', self.nameIndex)))
- writer.newline()
- if name:
- writer.comment(name)
- writer.newline()
- for size, perSizeValue in sorted(self.items()):
- writer.simpletag("track", size=fl2str(size, 16), value=perSizeValue)
- writer.newline()
- writer.endtag("trackEntry")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.track = str2fl(attrs['value'], 16)
- self.nameIndex = safeEval(attrs['nameIndex'])
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, _ = element
- if name != 'track':
- continue
- size = str2fl(attrs['size'], 16)
- self[size] = safeEval(attrs['value'])
-
- def __getitem__(self, size):
- return self._map[size]
-
- def __delitem__(self, size):
- del self._map[size]
-
- def __setitem__(self, size, value):
- self._map[size] = value
-
- def __len__(self):
- return len(self._map)
-
- def __iter__(self):
- return iter(self._map)
-
- def keys(self):
- return self._map.keys()
-
- sizes = keys
-
- def __repr__(self):
- return "TrackTableEntry({}, nameIndex={})".format(self._map, self.nameIndex)
-
- def __eq__(self, other):
- if not isinstance(other, self.__class__):
- return NotImplemented
- return self.nameIndex == other.nameIndex and dict(self) == dict(other)
-
- def __ne__(self, other):
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
+ def __init__(self, values={}, nameIndex=None):
+ self.nameIndex = nameIndex
+ self._map = dict(values)
+
+ def toXML(self, writer, ttFont):
+ name = ttFont["name"].getDebugName(self.nameIndex)
+ writer.begintag(
+ "trackEntry",
+ (("value", fl2str(self.track, 16)), ("nameIndex", self.nameIndex)),
+ )
+ writer.newline()
+ if name:
+ writer.comment(name)
+ writer.newline()
+ for size, perSizeValue in sorted(self.items()):
+ writer.simpletag("track", size=fl2str(size, 16), value=perSizeValue)
+ writer.newline()
+ writer.endtag("trackEntry")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.track = str2fl(attrs["value"], 16)
+ self.nameIndex = safeEval(attrs["nameIndex"])
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, _ = element
+ if name != "track":
+ continue
+ size = str2fl(attrs["size"], 16)
+ self[size] = safeEval(attrs["value"])
+
+ def __getitem__(self, size):
+ return self._map[size]
+
+ def __delitem__(self, size):
+ del self._map[size]
+
+ def __setitem__(self, size, value):
+ self._map[size] = value
+
+ def __len__(self):
+ return len(self._map)
+
+ def __iter__(self):
+ return iter(self._map)
+
+ def keys(self):
+ return self._map.keys()
+
+ sizes = keys
+
+ def __repr__(self):
+ return "TrackTableEntry({}, nameIndex={})".format(self._map, self.nameIndex)
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return self.nameIndex == other.nameIndex and dict(self) == dict(other)
+
+ def __ne__(self, other):
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
diff --git a/Lib/fontTools/ttLib/tables/_v_h_e_a.py b/Lib/fontTools/ttLib/tables/_v_h_e_a.py
index 2bb24667..de7ce245 100644
--- a/Lib/fontTools/ttLib/tables/_v_h_e_a.py
+++ b/Lib/fontTools/ttLib/tables/_v_h_e_a.py
@@ -1,7 +1,9 @@
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from fontTools.misc.fixedTools import (
- ensureVersionIsLong as fi2ve, versionToFixed as ve2fi)
+ ensureVersionIsLong as fi2ve,
+ versionToFixed as ve2fi,
+)
from . import DefaultTable
import math
@@ -27,92 +29,99 @@ vheaFormat = """
numberOfVMetrics: H
"""
+
class table__v_h_e_a(DefaultTable.DefaultTable):
+ # Note: Keep in sync with table__h_h_e_a
+
+ dependencies = ["vmtx", "glyf", "CFF ", "CFF2"]
+
+ def decompile(self, data, ttFont):
+ sstruct.unpack(vheaFormat, data, self)
+
+ def compile(self, ttFont):
+ if ttFont.recalcBBoxes and (
+ ttFont.isLoaded("glyf")
+ or ttFont.isLoaded("CFF ")
+ or ttFont.isLoaded("CFF2")
+ ):
+ self.recalc(ttFont)
+ self.tableVersion = fi2ve(self.tableVersion)
+ return sstruct.pack(vheaFormat, self)
+
+ def recalc(self, ttFont):
+ if "vmtx" not in ttFont:
+ return
+
+ vmtxTable = ttFont["vmtx"]
+ self.advanceHeightMax = max(adv for adv, _ in vmtxTable.metrics.values())
+
+ boundsHeightDict = {}
+ if "glyf" in ttFont:
+ glyfTable = ttFont["glyf"]
+ for name in ttFont.getGlyphOrder():
+ g = glyfTable[name]
+ if g.numberOfContours == 0:
+ continue
+ if g.numberOfContours < 0 and not hasattr(g, "yMax"):
+ # Composite glyph without extents set.
+ # Calculate those.
+ g.recalcBounds(glyfTable)
+ boundsHeightDict[name] = g.yMax - g.yMin
+ elif "CFF " in ttFont or "CFF2" in ttFont:
+ if "CFF " in ttFont:
+ topDict = ttFont["CFF "].cff.topDictIndex[0]
+ else:
+ topDict = ttFont["CFF2"].cff.topDictIndex[0]
+ charStrings = topDict.CharStrings
+ for name in ttFont.getGlyphOrder():
+ cs = charStrings[name]
+ bounds = cs.calcBounds(charStrings)
+ if bounds is not None:
+ boundsHeightDict[name] = int(
+ math.ceil(bounds[3]) - math.floor(bounds[1])
+ )
+
+ if boundsHeightDict:
+ minTopSideBearing = float("inf")
+ minBottomSideBearing = float("inf")
+ yMaxExtent = -float("inf")
+ for name, boundsHeight in boundsHeightDict.items():
+ advanceHeight, tsb = vmtxTable[name]
+ bsb = advanceHeight - tsb - boundsHeight
+ extent = tsb + boundsHeight
+ minTopSideBearing = min(minTopSideBearing, tsb)
+ minBottomSideBearing = min(minBottomSideBearing, bsb)
+ yMaxExtent = max(yMaxExtent, extent)
+ self.minTopSideBearing = minTopSideBearing
+ self.minBottomSideBearing = minBottomSideBearing
+ self.yMaxExtent = yMaxExtent
+
+ else: # No glyph has outlines.
+ self.minTopSideBearing = 0
+ self.minBottomSideBearing = 0
+ self.yMaxExtent = 0
+
+ def toXML(self, writer, ttFont):
+ formatstring, names, fixes = sstruct.getformat(vheaFormat)
+ for name in names:
+ value = getattr(self, name)
+ if name == "tableVersion":
+ value = fi2ve(value)
+ value = "0x%08x" % value
+ writer.simpletag(name, value=value)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "tableVersion":
+ setattr(self, name, ve2fi(attrs["value"]))
+ return
+ setattr(self, name, safeEval(attrs["value"]))
+
+ # reserved0 is caretOffset for legacy reasons
+ @property
+ def reserved0(self):
+ return self.caretOffset
- # Note: Keep in sync with table__h_h_e_a
-
- dependencies = ['vmtx', 'glyf', 'CFF ', 'CFF2']
-
- def decompile(self, data, ttFont):
- sstruct.unpack(vheaFormat, data, self)
-
- def compile(self, ttFont):
- if ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ') or ttFont.isLoaded('CFF2')):
- self.recalc(ttFont)
- self.tableVersion = fi2ve(self.tableVersion)
- return sstruct.pack(vheaFormat, self)
-
- def recalc(self, ttFont):
- if 'vmtx' in ttFont:
- vmtxTable = ttFont['vmtx']
- self.advanceHeightMax = max(adv for adv, _ in vmtxTable.metrics.values())
-
- boundsHeightDict = {}
- if 'glyf' in ttFont:
- glyfTable = ttFont['glyf']
- for name in ttFont.getGlyphOrder():
- g = glyfTable[name]
- if g.numberOfContours == 0:
- continue
- if g.numberOfContours < 0 and not hasattr(g, "yMax"):
- # Composite glyph without extents set.
- # Calculate those.
- g.recalcBounds(glyfTable)
- boundsHeightDict[name] = g.yMax - g.yMin
- elif 'CFF ' in ttFont or 'CFF2' in ttFont:
- if 'CFF ' in ttFont:
- topDict = ttFont['CFF '].cff.topDictIndex[0]
- else:
- topDict = ttFont['CFF2'].cff.topDictIndex[0]
- charStrings = topDict.CharStrings
- for name in ttFont.getGlyphOrder():
- cs = charStrings[name]
- bounds = cs.calcBounds(charStrings)
- if bounds is not None:
- boundsHeightDict[name] = int(
- math.ceil(bounds[3]) - math.floor(bounds[1]))
-
- if boundsHeightDict:
- minTopSideBearing = float('inf')
- minBottomSideBearing = float('inf')
- yMaxExtent = -float('inf')
- for name, boundsHeight in boundsHeightDict.items():
- advanceHeight, tsb = vmtxTable[name]
- bsb = advanceHeight - tsb - boundsHeight
- extent = tsb + boundsHeight
- minTopSideBearing = min(minTopSideBearing, tsb)
- minBottomSideBearing = min(minBottomSideBearing, bsb)
- yMaxExtent = max(yMaxExtent, extent)
- self.minTopSideBearing = minTopSideBearing
- self.minBottomSideBearing = minBottomSideBearing
- self.yMaxExtent = yMaxExtent
-
- else: # No glyph has outlines.
- self.minTopSideBearing = 0
- self.minBottomSideBearing = 0
- self.yMaxExtent = 0
-
- def toXML(self, writer, ttFont):
- formatstring, names, fixes = sstruct.getformat(vheaFormat)
- for name in names:
- value = getattr(self, name)
- if name == "tableVersion":
- value = fi2ve(value)
- value = "0x%08x" % value
- writer.simpletag(name, value=value)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "tableVersion":
- setattr(self, name, ve2fi(attrs["value"]))
- return
- setattr(self, name, safeEval(attrs["value"]))
-
- # reserved0 is caretOffset for legacy reasons
- @property
- def reserved0(self):
- return self.caretOffset
-
- @reserved0.setter
- def reserved0(self, value):
- self.caretOffset = value
+ @reserved0.setter
+ def reserved0(self, value):
+ self.caretOffset = value
diff --git a/Lib/fontTools/ttLib/tables/_v_m_t_x.py b/Lib/fontTools/ttLib/tables/_v_m_t_x.py
index fc818d83..a13304c3 100644
--- a/Lib/fontTools/ttLib/tables/_v_m_t_x.py
+++ b/Lib/fontTools/ttLib/tables/_v_m_t_x.py
@@ -2,9 +2,9 @@ from fontTools import ttLib
superclass = ttLib.getTableClass("hmtx")
-class table__v_m_t_x(superclass):
- headerTag = 'vhea'
- advanceName = 'height'
- sideBearingName = 'tsb'
- numberOfMetricsName = 'numberOfVMetrics'
+class table__v_m_t_x(superclass):
+ headerTag = "vhea"
+ advanceName = "height"
+ sideBearingName = "tsb"
+ numberOfMetricsName = "numberOfVMetrics"
diff --git a/Lib/fontTools/ttLib/tables/asciiTable.py b/Lib/fontTools/ttLib/tables/asciiTable.py
index a97d92df..6f81c526 100644
--- a/Lib/fontTools/ttLib/tables/asciiTable.py
+++ b/Lib/fontTools/ttLib/tables/asciiTable.py
@@ -3,19 +3,18 @@ from . import DefaultTable
class asciiTable(DefaultTable.DefaultTable):
+ def toXML(self, writer, ttFont):
+ data = tostr(self.data)
+ # removing null bytes. XXX needed??
+ data = data.split("\0")
+ data = strjoin(data)
+ writer.begintag("source")
+ writer.newline()
+ writer.write_noindent(data)
+ writer.newline()
+ writer.endtag("source")
+ writer.newline()
- def toXML(self, writer, ttFont):
- data = tostr(self.data)
- # removing null bytes. XXX needed??
- data = data.split('\0')
- data = strjoin(data)
- writer.begintag("source")
- writer.newline()
- writer.write_noindent(data)
- writer.newline()
- writer.endtag("source")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- lines = strjoin(content).split("\n")
- self.data = tobytes("\n".join(lines[1:-1]))
+ def fromXML(self, name, attrs, content, ttFont):
+ lines = strjoin(content).split("\n")
+ self.data = tobytes("\n".join(lines[1:-1]))
diff --git a/Lib/fontTools/ttLib/tables/grUtils.py b/Lib/fontTools/ttLib/tables/grUtils.py
index a60df234..785684b1 100644
--- a/Lib/fontTools/ttLib/tables/grUtils.py
+++ b/Lib/fontTools/ttLib/tables/grUtils.py
@@ -1,4 +1,5 @@
import struct, warnings
+
try:
import lz4
except ImportError:
@@ -6,12 +7,13 @@ except ImportError:
else:
import lz4.block
-#old scheme for VERSION < 0.9 otherwise use lz4.block
+# old scheme for VERSION < 0.9 otherwise use lz4.block
+
def decompress(data):
(compression,) = struct.unpack(">L", data[4:8])
scheme = compression >> 27
- size = compression & 0x07ffffff
+ size = compression & 0x07FFFFFF
if scheme == 0:
pass
elif scheme == 1 and lz4:
@@ -24,23 +26,27 @@ def decompress(data):
warnings.warn("Table is compressed with an unsupported compression scheme")
return (data, scheme)
+
def compress(scheme, data):
- hdr = data[:4] + struct.pack(">L", (scheme << 27) + (len(data) & 0x07ffffff))
- if scheme == 0 :
+ hdr = data[:4] + struct.pack(">L", (scheme << 27) + (len(data) & 0x07FFFFFF))
+ if scheme == 0:
return data
elif scheme == 1 and lz4:
- res = lz4.block.compress(data, mode='high_compression', compression=16, store_size=False)
+ res = lz4.block.compress(
+ data, mode="high_compression", compression=16, store_size=False
+ )
return hdr + res
else:
warnings.warn("Table failed to compress by unsupported compression scheme")
return data
+
def _entries(attrs, sameval):
ak = 0
vals = []
lastv = 0
- for k,v in attrs:
- if len(vals) and (k != ak + 1 or (sameval and v != lastv)) :
+ for k, v in attrs:
+ if len(vals) and (k != ak + 1 or (sameval and v != lastv)):
yield (ak - len(vals) + 1, len(vals), vals)
vals = []
ak = k
@@ -48,14 +54,16 @@ def _entries(attrs, sameval):
lastv = v
yield (ak - len(vals) + 1, len(vals), vals)
-def entries(attributes, sameval = False):
- g = _entries(sorted(attributes.items(), key=lambda x:int(x[0])), sameval)
+
+def entries(attributes, sameval=False):
+ g = _entries(sorted(attributes.items(), key=lambda x: int(x[0])), sameval)
return g
+
def bininfo(num, size=1):
if num == 0:
return struct.pack(">4H", 0, 0, 0, 0)
- srange = 1;
+ srange = 1
select = 0
while srange <= num:
srange *= 2
@@ -66,16 +74,19 @@ def bininfo(num, size=1):
shift = num * size - srange
return struct.pack(">4H", num, srange, select, shift)
+
def num2tag(n):
if n < 0x200000:
return str(n)
else:
- return struct.unpack('4s', struct.pack('>L', n))[0].replace(b'\000', b'').decode()
+ return (
+ struct.unpack("4s", struct.pack(">L", n))[0].replace(b"\000", b"").decode()
+ )
+
def tag2num(n):
try:
return int(n)
except ValueError:
- n = (n+" ")[:4]
- return struct.unpack('>L', n.encode('ascii'))[0]
-
+ n = (n + " ")[:4]
+ return struct.unpack(">L", n.encode("ascii"))[0]
diff --git a/Lib/fontTools/ttLib/tables/otBase.py b/Lib/fontTools/ttLib/tables/otBase.py
index 1bd3198d..d565603b 100644
--- a/Lib/fontTools/ttLib/tables/otBase.py
+++ b/Lib/fontTools/ttLib/tables/otBase.py
@@ -13,1188 +13,1285 @@ log = logging.getLogger(__name__)
have_uharfbuzz = False
try:
- import uharfbuzz as hb
- # repack method added in uharfbuzz >= 0.23; if uharfbuzz *can* be
- # imported but repack method is missing, behave as if uharfbuzz
- # is not available (fallback to the slower Python implementation)
- have_uharfbuzz = callable(getattr(hb, "repack", None))
+ import uharfbuzz as hb
+
+ # repack method added in uharfbuzz >= 0.23; if uharfbuzz *can* be
+ # imported but repack method is missing, behave as if uharfbuzz
+ # is not available (fallback to the slower Python implementation)
+ have_uharfbuzz = callable(getattr(hb, "repack", None))
except ImportError:
- pass
+ pass
USE_HARFBUZZ_REPACKER = OPTIONS[f"{__name__}:USE_HARFBUZZ_REPACKER"]
+
class OverflowErrorRecord(object):
- def __init__(self, overflowTuple):
- self.tableType = overflowTuple[0]
- self.LookupListIndex = overflowTuple[1]
- self.SubTableIndex = overflowTuple[2]
- self.itemName = overflowTuple[3]
- self.itemIndex = overflowTuple[4]
+ def __init__(self, overflowTuple):
+ self.tableType = overflowTuple[0]
+ self.LookupListIndex = overflowTuple[1]
+ self.SubTableIndex = overflowTuple[2]
+ self.itemName = overflowTuple[3]
+ self.itemIndex = overflowTuple[4]
+
+ def __repr__(self):
+ return str(
+ (
+ self.tableType,
+ "LookupIndex:",
+ self.LookupListIndex,
+ "SubTableIndex:",
+ self.SubTableIndex,
+ "ItemName:",
+ self.itemName,
+ "ItemIndex:",
+ self.itemIndex,
+ )
+ )
- def __repr__(self):
- return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex))
class OTLOffsetOverflowError(Exception):
- def __init__(self, overflowErrorRecord):
- self.value = overflowErrorRecord
+ def __init__(self, overflowErrorRecord):
+ self.value = overflowErrorRecord
+
+ def __str__(self):
+ return repr(self.value)
- def __str__(self):
- return repr(self.value)
class RepackerState(IntEnum):
- # Repacking control flow is implemnted using a state machine. The state machine table:
- #
- # State | Packing Success | Packing Failed | Exception Raised |
- # ------------+-----------------+----------------+------------------+
- # PURE_FT | Return result | PURE_FT | Return failure |
- # HB_FT | Return result | HB_FT | FT_FALLBACK |
- # FT_FALLBACK | HB_FT | FT_FALLBACK | Return failure |
+ # Repacking control flow is implemnted using a state machine. The state machine table:
+ #
+ # State | Packing Success | Packing Failed | Exception Raised |
+ # ------------+-----------------+----------------+------------------+
+ # PURE_FT | Return result | PURE_FT | Return failure |
+ # HB_FT | Return result | HB_FT | FT_FALLBACK |
+ # FT_FALLBACK | HB_FT | FT_FALLBACK | Return failure |
+
+ # Pack only with fontTools, don't allow sharing between extensions.
+ PURE_FT = 1
- # Pack only with fontTools, don't allow sharing between extensions.
- PURE_FT = 1
+ # Attempt to pack with harfbuzz (allowing sharing between extensions)
+ # use fontTools to attempt overflow resolution.
+ HB_FT = 2
- # Attempt to pack with harfbuzz (allowing sharing between extensions)
- # use fontTools to attempt overflow resolution.
- HB_FT = 2
+ # Fallback if HB/FT packing gets stuck. Pack only with fontTools, don't allow sharing between
+ # extensions.
+ FT_FALLBACK = 3
- # Fallback if HB/FT packing gets stuck. Pack only with fontTools, don't allow sharing between
- # extensions.
- FT_FALLBACK = 3
class BaseTTXConverter(DefaultTable):
- """Generic base class for TTX table converters. It functions as an
- adapter between the TTX (ttLib actually) table model and the model
- we use for OpenType tables, which is necessarily subtly different.
- """
-
- def decompile(self, data, font):
- """Create an object from the binary data. Called automatically on access."""
- from . import otTables
- reader = OTTableReader(data, tableTag=self.tableTag)
- tableClass = getattr(otTables, self.tableTag)
- self.table = tableClass()
- self.table.decompile(reader, font)
-
- def compile(self, font):
- """Compiles the table into binary. Called automatically on save."""
-
- # General outline:
- # Create a top-level OTTableWriter for the GPOS/GSUB table.
- # Call the compile method for the the table
- # for each 'converter' record in the table converter list
- # call converter's write method for each item in the value.
- # - For simple items, the write method adds a string to the
- # writer's self.items list.
- # - For Struct/Table/Subtable items, it add first adds new writer to the
- # to the writer's self.items, then calls the item's compile method.
- # This creates a tree of writers, rooted at the GUSB/GPOS writer, with
- # each writer representing a table, and the writer.items list containing
- # the child data strings and writers.
- # call the getAllData method
- # call _doneWriting, which removes duplicates
- # call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables
- # Traverse the flat list of tables, calling getDataLength on each to update their position
- # Traverse the flat list of tables again, calling getData each get the data in the table, now that
- # pos's and offset are known.
-
- # If a lookup subtable overflows an offset, we have to start all over.
- overflowRecord = None
- # this is 3-state option: default (None) means automatically use hb.repack or
- # silently fall back if it fails; True, use it and raise error if not possible
- # or it errors out; False, don't use it, even if you can.
- use_hb_repack = font.cfg[USE_HARFBUZZ_REPACKER]
- if self.tableTag in ("GSUB", "GPOS"):
- if use_hb_repack is False:
- log.debug(
- "hb.repack disabled, compiling '%s' with pure-python serializer",
- self.tableTag,
- )
- elif not have_uharfbuzz:
- if use_hb_repack is True:
- raise ImportError("No module named 'uharfbuzz'")
- else:
- assert use_hb_repack is None
- log.debug(
- "uharfbuzz not found, compiling '%s' with pure-python serializer",
- self.tableTag,
- )
-
- if (use_hb_repack in (None, True)
- and have_uharfbuzz
- and self.tableTag in ("GSUB", "GPOS")):
- state = RepackerState.HB_FT
- else:
- state = RepackerState.PURE_FT
-
- hb_first_error_logged = False
- lastOverflowRecord = None
- while True:
- try:
- writer = OTTableWriter(tableTag=self.tableTag)
- self.table.compile(writer, font)
- if state == RepackerState.HB_FT:
- return self.tryPackingHarfbuzz(writer, hb_first_error_logged)
- elif state == RepackerState.PURE_FT:
- return self.tryPackingFontTools(writer)
- elif state == RepackerState.FT_FALLBACK:
- # Run packing with FontTools only, but don't return the result as it will
- # not be optimally packed. Once a successful packing has been found, state is
- # changed back to harfbuzz packing to produce the final, optimal, packing.
- self.tryPackingFontTools(writer)
- log.debug("Re-enabling sharing between extensions and switching back to "
- "harfbuzz+fontTools packing.")
- state = RepackerState.HB_FT
-
- except OTLOffsetOverflowError as e:
- hb_first_error_logged = True
- ok = self.tryResolveOverflow(font, e, lastOverflowRecord)
- lastOverflowRecord = e.value
-
- if ok:
- continue
-
- if state is RepackerState.HB_FT:
- log.debug("Harfbuzz packing out of resolutions, disabling sharing between extensions and "
- "switching to fontTools only packing.")
- state = RepackerState.FT_FALLBACK
- else:
- raise
-
- def tryPackingHarfbuzz(self, writer, hb_first_error_logged):
- try:
- log.debug("serializing '%s' with hb.repack", self.tableTag)
- return writer.getAllDataUsingHarfbuzz(self.tableTag)
- except (ValueError, MemoryError, hb.RepackerError) as e:
- # Only log hb repacker errors the first time they occur in
- # the offset-overflow resolution loop, they are just noisy.
- # Maybe we can revisit this if/when uharfbuzz actually gives
- # us more info as to why hb.repack failed...
- if not hb_first_error_logged:
- error_msg = f"{type(e).__name__}"
- if str(e) != "":
- error_msg += f": {e}"
- log.warning(
- "hb.repack failed to serialize '%s', attempting fonttools resolutions "
- "; the error message was: %s",
- self.tableTag,
- error_msg,
- )
- hb_first_error_logged = True
- return writer.getAllData(remove_duplicate=False)
-
-
- def tryPackingFontTools(self, writer):
- return writer.getAllData()
-
-
- def tryResolveOverflow(self, font, e, lastOverflowRecord):
- ok = 0
- if lastOverflowRecord == e.value:
- # Oh well...
- return ok
-
- overflowRecord = e.value
- log.info("Attempting to fix OTLOffsetOverflowError %s", e)
-
- if overflowRecord.itemName is None:
- from .otTables import fixLookupOverFlows
- ok = fixLookupOverFlows(font, overflowRecord)
- else:
- from .otTables import fixSubTableOverFlows
- ok = fixSubTableOverFlows(font, overflowRecord)
-
- if ok:
- return ok
-
- # Try upgrading lookup to Extension and hope
- # that cross-lookup sharing not happening would
- # fix overflow...
- from .otTables import fixLookupOverFlows
- return fixLookupOverFlows(font, overflowRecord)
-
- def toXML(self, writer, font):
- self.table.toXML2(writer, font)
-
- def fromXML(self, name, attrs, content, font):
- from . import otTables
- if not hasattr(self, "table"):
- tableClass = getattr(otTables, self.tableTag)
- self.table = tableClass()
- self.table.fromXML(name, attrs, content, font)
- self.table.populateDefaults()
-
- def ensureDecompiled(self, recurse=True):
- self.table.ensureDecompiled(recurse=recurse)
+ """Generic base class for TTX table converters. It functions as an
+ adapter between the TTX (ttLib actually) table model and the model
+ we use for OpenType tables, which is necessarily subtly different.
+ """
+
+ def decompile(self, data, font):
+ """Create an object from the binary data. Called automatically on access."""
+ from . import otTables
+
+ reader = OTTableReader(data, tableTag=self.tableTag)
+ tableClass = getattr(otTables, self.tableTag)
+ self.table = tableClass()
+ self.table.decompile(reader, font)
+
+ def compile(self, font):
+ """Compiles the table into binary. Called automatically on save."""
+
+ # General outline:
+ # Create a top-level OTTableWriter for the GPOS/GSUB table.
+ # Call the compile method for the the table
+ # for each 'converter' record in the table converter list
+ # call converter's write method for each item in the value.
+ # - For simple items, the write method adds a string to the
+ # writer's self.items list.
+ # - For Struct/Table/Subtable items, it add first adds new writer to the
+ # to the writer's self.items, then calls the item's compile method.
+ # This creates a tree of writers, rooted at the GUSB/GPOS writer, with
+ # each writer representing a table, and the writer.items list containing
+ # the child data strings and writers.
+ # call the getAllData method
+ # call _doneWriting, which removes duplicates
+ # call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables
+ # Traverse the flat list of tables, calling getDataLength on each to update their position
+ # Traverse the flat list of tables again, calling getData each get the data in the table, now that
+ # pos's and offset are known.
+
+ # If a lookup subtable overflows an offset, we have to start all over.
+ overflowRecord = None
+ # this is 3-state option: default (None) means automatically use hb.repack or
+ # silently fall back if it fails; True, use it and raise error if not possible
+ # or it errors out; False, don't use it, even if you can.
+ use_hb_repack = font.cfg[USE_HARFBUZZ_REPACKER]
+ if self.tableTag in ("GSUB", "GPOS"):
+ if use_hb_repack is False:
+ log.debug(
+ "hb.repack disabled, compiling '%s' with pure-python serializer",
+ self.tableTag,
+ )
+ elif not have_uharfbuzz:
+ if use_hb_repack is True:
+ raise ImportError("No module named 'uharfbuzz'")
+ else:
+ assert use_hb_repack is None
+ log.debug(
+ "uharfbuzz not found, compiling '%s' with pure-python serializer",
+ self.tableTag,
+ )
+
+ if (
+ use_hb_repack in (None, True)
+ and have_uharfbuzz
+ and self.tableTag in ("GSUB", "GPOS")
+ ):
+ state = RepackerState.HB_FT
+ else:
+ state = RepackerState.PURE_FT
+
+ hb_first_error_logged = False
+ lastOverflowRecord = None
+ while True:
+ try:
+ writer = OTTableWriter(tableTag=self.tableTag)
+ self.table.compile(writer, font)
+ if state == RepackerState.HB_FT:
+ return self.tryPackingHarfbuzz(writer, hb_first_error_logged)
+ elif state == RepackerState.PURE_FT:
+ return self.tryPackingFontTools(writer)
+ elif state == RepackerState.FT_FALLBACK:
+ # Run packing with FontTools only, but don't return the result as it will
+ # not be optimally packed. Once a successful packing has been found, state is
+ # changed back to harfbuzz packing to produce the final, optimal, packing.
+ self.tryPackingFontTools(writer)
+ log.debug(
+ "Re-enabling sharing between extensions and switching back to "
+ "harfbuzz+fontTools packing."
+ )
+ state = RepackerState.HB_FT
+
+ except OTLOffsetOverflowError as e:
+ hb_first_error_logged = True
+ ok = self.tryResolveOverflow(font, e, lastOverflowRecord)
+ lastOverflowRecord = e.value
+
+ if ok:
+ continue
+
+ if state is RepackerState.HB_FT:
+ log.debug(
+ "Harfbuzz packing out of resolutions, disabling sharing between extensions and "
+ "switching to fontTools only packing."
+ )
+ state = RepackerState.FT_FALLBACK
+ else:
+ raise
+
+ def tryPackingHarfbuzz(self, writer, hb_first_error_logged):
+ try:
+ log.debug("serializing '%s' with hb.repack", self.tableTag)
+ return writer.getAllDataUsingHarfbuzz(self.tableTag)
+ except (ValueError, MemoryError, hb.RepackerError) as e:
+ # Only log hb repacker errors the first time they occur in
+ # the offset-overflow resolution loop, they are just noisy.
+ # Maybe we can revisit this if/when uharfbuzz actually gives
+ # us more info as to why hb.repack failed...
+ if not hb_first_error_logged:
+ error_msg = f"{type(e).__name__}"
+ if str(e) != "":
+ error_msg += f": {e}"
+ log.warning(
+ "hb.repack failed to serialize '%s', attempting fonttools resolutions "
+ "; the error message was: %s",
+ self.tableTag,
+ error_msg,
+ )
+ hb_first_error_logged = True
+ return writer.getAllData(remove_duplicate=False)
+
+ def tryPackingFontTools(self, writer):
+ return writer.getAllData()
+
+ def tryResolveOverflow(self, font, e, lastOverflowRecord):
+ ok = 0
+ if lastOverflowRecord == e.value:
+ # Oh well...
+ return ok
+
+ overflowRecord = e.value
+ log.info("Attempting to fix OTLOffsetOverflowError %s", e)
+
+ if overflowRecord.itemName is None:
+ from .otTables import fixLookupOverFlows
+
+ ok = fixLookupOverFlows(font, overflowRecord)
+ else:
+ from .otTables import fixSubTableOverFlows
+
+ ok = fixSubTableOverFlows(font, overflowRecord)
+
+ if ok:
+ return ok
+
+ # Try upgrading lookup to Extension and hope
+ # that cross-lookup sharing not happening would
+ # fix overflow...
+ from .otTables import fixLookupOverFlows
+
+ return fixLookupOverFlows(font, overflowRecord)
+
+ def toXML(self, writer, font):
+ self.table.toXML2(writer, font)
+
+ def fromXML(self, name, attrs, content, font):
+ from . import otTables
+
+ if not hasattr(self, "table"):
+ tableClass = getattr(otTables, self.tableTag)
+ self.table = tableClass()
+ self.table.fromXML(name, attrs, content, font)
+ self.table.populateDefaults()
+
+ def ensureDecompiled(self, recurse=True):
+ self.table.ensureDecompiled(recurse=recurse)
# https://github.com/fonttools/fonttools/pull/2285#issuecomment-834652928
-assert len(struct.pack('i', 0)) == 4
-assert array.array('i').itemsize == 4, "Oops, file a bug against fonttools."
+assert len(struct.pack("i", 0)) == 4
+assert array.array("i").itemsize == 4, "Oops, file a bug against fonttools."
+
class OTTableReader(object):
- """Helper class to retrieve data from an OpenType table."""
-
- __slots__ = ('data', 'offset', 'pos', 'localState', 'tableTag')
-
- def __init__(self, data, localState=None, offset=0, tableTag=None):
- self.data = data
- self.offset = offset
- self.pos = offset
- self.localState = localState
- self.tableTag = tableTag
-
- def advance(self, count):
- self.pos += count
-
- def seek(self, pos):
- self.pos = pos
-
- def copy(self):
- other = self.__class__(self.data, self.localState, self.offset, self.tableTag)
- other.pos = self.pos
- return other
-
- def getSubReader(self, offset):
- offset = self.offset + offset
- return self.__class__(self.data, self.localState, offset, self.tableTag)
-
- def readValue(self, typecode, staticSize):
- pos = self.pos
- newpos = pos + staticSize
- value, = struct.unpack(f">{typecode}", self.data[pos:newpos])
- self.pos = newpos
- return value
- def readArray(self, typecode, staticSize, count):
- pos = self.pos
- newpos = pos + count * staticSize
- value = array.array(typecode, self.data[pos:newpos])
- if sys.byteorder != "big": value.byteswap()
- self.pos = newpos
- return value.tolist()
-
- def readInt8(self):
- return self.readValue("b", staticSize=1)
- def readInt8Array(self, count):
- return self.readArray("b", staticSize=1, count=count)
-
- def readShort(self):
- return self.readValue("h", staticSize=2)
- def readShortArray(self, count):
- return self.readArray("h", staticSize=2, count=count)
-
- def readLong(self):
- return self.readValue("i", staticSize=4)
- def readLongArray(self, count):
- return self.readArray("i", staticSize=4, count=count)
-
- def readUInt8(self):
- return self.readValue("B", staticSize=1)
- def readUInt8Array(self, count):
- return self.readArray("B", staticSize=1, count=count)
-
- def readUShort(self):
- return self.readValue("H", staticSize=2)
- def readUShortArray(self, count):
- return self.readArray("H", staticSize=2, count=count)
-
- def readULong(self):
- return self.readValue("I", staticSize=4)
- def readULongArray(self, count):
- return self.readArray("I", staticSize=4, count=count)
-
- def readUInt24(self):
- pos = self.pos
- newpos = pos + 3
- value, = struct.unpack(">l", b'\0'+self.data[pos:newpos])
- self.pos = newpos
- return value
- def readUInt24Array(self, count):
- return [self.readUInt24() for _ in range(count)]
-
- def readTag(self):
- pos = self.pos
- newpos = pos + 4
- value = Tag(self.data[pos:newpos])
- assert len(value) == 4, value
- self.pos = newpos
- return value
-
- def readData(self, count):
- pos = self.pos
- newpos = pos + count
- value = self.data[pos:newpos]
- self.pos = newpos
- return value
-
- def __setitem__(self, name, value):
- state = self.localState.copy() if self.localState else dict()
- state[name] = value
- self.localState = state
-
- def __getitem__(self, name):
- return self.localState and self.localState[name]
-
- def __contains__(self, name):
- return self.localState and name in self.localState
+ """Helper class to retrieve data from an OpenType table."""
+
+ __slots__ = ("data", "offset", "pos", "localState", "tableTag")
+
+ def __init__(self, data, localState=None, offset=0, tableTag=None):
+ self.data = data
+ self.offset = offset
+ self.pos = offset
+ self.localState = localState
+ self.tableTag = tableTag
+
+ def advance(self, count):
+ self.pos += count
+
+ def seek(self, pos):
+ self.pos = pos
+
+ def copy(self):
+ other = self.__class__(self.data, self.localState, self.offset, self.tableTag)
+ other.pos = self.pos
+ return other
+
+ def getSubReader(self, offset):
+ offset = self.offset + offset
+ return self.__class__(self.data, self.localState, offset, self.tableTag)
+
+ def readValue(self, typecode, staticSize):
+ pos = self.pos
+ newpos = pos + staticSize
+ (value,) = struct.unpack(f">{typecode}", self.data[pos:newpos])
+ self.pos = newpos
+ return value
+
+ def readArray(self, typecode, staticSize, count):
+ pos = self.pos
+ newpos = pos + count * staticSize
+ value = array.array(typecode, self.data[pos:newpos])
+ if sys.byteorder != "big":
+ value.byteswap()
+ self.pos = newpos
+ return value.tolist()
+
+ def readInt8(self):
+ return self.readValue("b", staticSize=1)
+
+ def readInt8Array(self, count):
+ return self.readArray("b", staticSize=1, count=count)
+
+ def readShort(self):
+ return self.readValue("h", staticSize=2)
+
+ def readShortArray(self, count):
+ return self.readArray("h", staticSize=2, count=count)
+
+ def readLong(self):
+ return self.readValue("i", staticSize=4)
+
+ def readLongArray(self, count):
+ return self.readArray("i", staticSize=4, count=count)
+
+ def readUInt8(self):
+ return self.readValue("B", staticSize=1)
+
+ def readUInt8Array(self, count):
+ return self.readArray("B", staticSize=1, count=count)
+
+ def readUShort(self):
+ return self.readValue("H", staticSize=2)
+
+ def readUShortArray(self, count):
+ return self.readArray("H", staticSize=2, count=count)
+
+ def readULong(self):
+ return self.readValue("I", staticSize=4)
+
+ def readULongArray(self, count):
+ return self.readArray("I", staticSize=4, count=count)
+
+ def readUInt24(self):
+ pos = self.pos
+ newpos = pos + 3
+ (value,) = struct.unpack(">l", b"\0" + self.data[pos:newpos])
+ self.pos = newpos
+ return value
+
+ def readUInt24Array(self, count):
+ return [self.readUInt24() for _ in range(count)]
+
+ def readTag(self):
+ pos = self.pos
+ newpos = pos + 4
+ value = Tag(self.data[pos:newpos])
+ assert len(value) == 4, value
+ self.pos = newpos
+ return value
+
+ def readData(self, count):
+ pos = self.pos
+ newpos = pos + count
+ value = self.data[pos:newpos]
+ self.pos = newpos
+ return value
+
+ def __setitem__(self, name, value):
+ state = self.localState.copy() if self.localState else dict()
+ state[name] = value
+ self.localState = state
+
+ def __getitem__(self, name):
+ return self.localState and self.localState[name]
+
+ def __contains__(self, name):
+ return self.localState and name in self.localState
+
+
+class OffsetToWriter(object):
+ def __init__(self, subWriter, offsetSize):
+ self.subWriter = subWriter
+ self.offsetSize = offsetSize
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+ return self.subWriter == other.subWriter and self.offsetSize == other.offsetSize
+
+ def __hash__(self):
+ # only works after self._doneWriting() has been called
+ return hash((self.subWriter, self.offsetSize))
class OTTableWriter(object):
- """Helper class to gather and assemble data for OpenType tables."""
-
- def __init__(self, localState=None, tableTag=None, offsetSize=2):
- self.items = []
- self.pos = None
- self.localState = localState
- self.tableTag = tableTag
- self.offsetSize = offsetSize
- self.parent = None
-
- # DEPRECATED: 'longOffset' is kept as a property for backward compat with old code.
- # You should use 'offsetSize' instead (2, 3 or 4 bytes).
- @property
- def longOffset(self):
- return self.offsetSize == 4
-
- @longOffset.setter
- def longOffset(self, value):
- self.offsetSize = 4 if value else 2
-
- def __setitem__(self, name, value):
- state = self.localState.copy() if self.localState else dict()
- state[name] = value
- self.localState = state
-
- def __getitem__(self, name):
- return self.localState[name]
-
- def __delitem__(self, name):
- del self.localState[name]
-
- # assembler interface
-
- def getDataLength(self):
- """Return the length of this table in bytes, without subtables."""
- l = 0
- for item in self.items:
- if hasattr(item, "getCountData"):
- l += item.size
- elif hasattr(item, "getData"):
- l += item.offsetSize
- else:
- l = l + len(item)
- return l
-
- def getData(self):
- """Assemble the data for this writer/table, without subtables."""
- items = list(self.items) # make a shallow copy
- pos = self.pos
- numItems = len(items)
- for i in range(numItems):
- item = items[i]
-
- if hasattr(item, "getData"):
- if item.offsetSize == 4:
- items[i] = packULong(item.pos - pos)
- elif item.offsetSize == 2:
- try:
- items[i] = packUShort(item.pos - pos)
- except struct.error:
- # provide data to fix overflow problem.
- overflowErrorRecord = self.getOverflowErrorRecord(item)
-
- raise OTLOffsetOverflowError(overflowErrorRecord)
- elif item.offsetSize == 3:
- items[i] = packUInt24(item.pos - pos)
- else:
- raise ValueError(item.offsetSize)
-
- return bytesjoin(items)
-
- def getDataForHarfbuzz(self):
- """Assemble the data for this writer/table with all offset field set to 0"""
- items = list(self.items)
- packFuncs = {2: packUShort, 3: packUInt24, 4: packULong}
- for i, item in enumerate(items):
- if hasattr(item, "getData"):
- # Offset value is not needed in harfbuzz repacker, so setting offset to 0 to avoid overflow here
- if item.offsetSize in packFuncs:
- items[i] = packFuncs[item.offsetSize](0)
- else:
- raise ValueError(item.offsetSize)
-
- return bytesjoin(items)
-
- def __hash__(self):
- # only works after self._doneWriting() has been called
- return hash(self.items)
-
- def __ne__(self, other):
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
-
- def __eq__(self, other):
- if type(self) != type(other):
- return NotImplemented
- return self.offsetSize == other.offsetSize and self.items == other.items
-
- def _doneWriting(self, internedTables, shareExtension=False):
- # Convert CountData references to data string items
- # collapse duplicate table references to a unique entry
- # "tables" are OTTableWriter objects.
-
- # For Extension Lookup types, we can
- # eliminate duplicates only within the tree under the Extension Lookup,
- # as offsets may exceed 64K even between Extension LookupTable subtables.
- isExtension = hasattr(self, "Extension")
-
- # Certain versions of Uniscribe reject the font if the GSUB/GPOS top-level
- # arrays (ScriptList, FeatureList, LookupList) point to the same, possibly
- # empty, array. So, we don't share those.
- # See: https://github.com/fonttools/fonttools/issues/518
- dontShare = hasattr(self, 'DontShare')
-
- if isExtension and not shareExtension:
- internedTables = {}
-
- items = self.items
- for i in range(len(items)):
- item = items[i]
- if hasattr(item, "getCountData"):
- items[i] = item.getCountData()
- elif hasattr(item, "getData"):
- item._doneWriting(internedTables, shareExtension=shareExtension)
- # At this point, all subwriters are hashable based on their items.
- # (See hash and comparison magic methods above.) So the ``setdefault``
- # call here will return the first writer object we've seen with
- # equal content, or store it in the dictionary if it's not been
- # seen yet. We therefore replace the subwriter object with an equivalent
- # object, which deduplicates the tree.
- if not dontShare:
- items[i] = item = internedTables.setdefault(item, item)
- self.items = tuple(items)
-
- def _gatherTables(self, tables, extTables, done):
- # Convert table references in self.items tree to a flat
- # list of tables in depth-first traversal order.
- # "tables" are OTTableWriter objects.
- # We do the traversal in reverse order at each level, in order to
- # resolve duplicate references to be the last reference in the list of tables.
- # For extension lookups, duplicate references can be merged only within the
- # writer tree under the extension lookup.
-
- done[id(self)] = True
-
- numItems = len(self.items)
- iRange = list(range(numItems))
- iRange.reverse()
-
- isExtension = hasattr(self, "Extension")
-
- selfTables = tables
-
- if isExtension:
- assert extTables is not None, "Program or XML editing error. Extension subtables cannot contain extensions subtables"
- tables, extTables, done = extTables, None, {}
-
- # add Coverage table if it is sorted last.
- sortCoverageLast = False
- if hasattr(self, "sortCoverageLast"):
- # Find coverage table
- for i in range(numItems):
- item = self.items[i]
- if getattr(item, 'name', None) == "Coverage":
- sortCoverageLast = True
- break
- if id(item) not in done:
- item._gatherTables(tables, extTables, done)
- else:
- # We're a new parent of item
- pass
-
- for i in iRange:
- item = self.items[i]
- if not hasattr(item, "getData"):
- continue
-
- if sortCoverageLast and (i==1) and getattr(item, 'name', None) == 'Coverage':
- # we've already 'gathered' it above
- continue
-
- if id(item) not in done:
- item._gatherTables(tables, extTables, done)
- else:
- # Item is already written out by other parent
- pass
-
- selfTables.append(self)
-
- def _gatherGraphForHarfbuzz(self, tables, obj_list, done, objidx, virtual_edges):
- real_links = []
- virtual_links = []
- item_idx = objidx
-
- # Merge virtual_links from parent
- for idx in virtual_edges:
- virtual_links.append((0, 0, idx))
-
- sortCoverageLast = False
- coverage_idx = 0
- if hasattr(self, "sortCoverageLast"):
- # Find coverage table
- for i, item in enumerate(self.items):
- if getattr(item, 'name', None) == "Coverage":
- sortCoverageLast = True
- if id(item) not in done:
- coverage_idx = item_idx = item._gatherGraphForHarfbuzz(tables, obj_list, done, item_idx, virtual_edges)
- else:
- coverage_idx = done[id(item)]
- virtual_edges.append(coverage_idx)
- break
-
- child_idx = 0
- offset_pos = 0
- for i, item in enumerate(self.items):
- if hasattr(item, "getData"):
- pos = offset_pos
- elif hasattr(item, "getCountData"):
- offset_pos += item.size
- continue
- else:
- offset_pos = offset_pos + len(item)
- continue
-
- if id(item) not in done:
- child_idx = item_idx = item._gatherGraphForHarfbuzz(tables, obj_list, done, item_idx, virtual_edges)
- else:
- child_idx = done[id(item)]
-
- real_edge = (pos, item.offsetSize, child_idx)
- real_links.append(real_edge)
- offset_pos += item.offsetSize
-
- tables.append(self)
- obj_list.append((real_links,virtual_links))
- item_idx += 1
- done[id(self)] = item_idx
- if sortCoverageLast:
- virtual_edges.pop()
-
- return item_idx
-
- def getAllDataUsingHarfbuzz(self, tableTag):
- """The Whole table is represented as a Graph.
- Assemble graph data and call Harfbuzz repacker to pack the table.
- Harfbuzz repacker is faster and retain as much sub-table sharing as possible, see also:
- https://github.com/harfbuzz/harfbuzz/blob/main/docs/repacker.md
- The input format for hb.repack() method is explained here:
- https://github.com/harfbuzz/uharfbuzz/blob/main/src/uharfbuzz/_harfbuzz.pyx#L1149
- """
- internedTables = {}
- self._doneWriting(internedTables, shareExtension=True)
- tables = []
- obj_list = []
- done = {}
- objidx = 0
- virtual_edges = []
- self._gatherGraphForHarfbuzz(tables, obj_list, done, objidx, virtual_edges)
- # Gather all data in two passes: the absolute positions of all
- # subtable are needed before the actual data can be assembled.
- pos = 0
- for table in tables:
- table.pos = pos
- pos = pos + table.getDataLength()
-
- data = []
- for table in tables:
- tableData = table.getDataForHarfbuzz()
- data.append(tableData)
-
- if hasattr(hb, "repack_with_tag"):
- return hb.repack_with_tag(str(tableTag), data, obj_list)
- else:
- return hb.repack(data, obj_list)
-
- def getAllData(self, remove_duplicate=True):
- """Assemble all data, including all subtables."""
- if remove_duplicate:
- internedTables = {}
- self._doneWriting(internedTables)
- tables = []
- extTables = []
- done = {}
- self._gatherTables(tables, extTables, done)
- tables.reverse()
- extTables.reverse()
- # Gather all data in two passes: the absolute positions of all
- # subtable are needed before the actual data can be assembled.
- pos = 0
- for table in tables:
- table.pos = pos
- pos = pos + table.getDataLength()
-
- for table in extTables:
- table.pos = pos
- pos = pos + table.getDataLength()
-
- data = []
- for table in tables:
- tableData = table.getData()
- data.append(tableData)
-
- for table in extTables:
- tableData = table.getData()
- data.append(tableData)
-
- return bytesjoin(data)
-
- # interface for gathering data, as used by table.compile()
-
- def getSubWriter(self, offsetSize=2):
- subwriter = self.__class__(self.localState, self.tableTag, offsetSize=offsetSize)
- subwriter.parent = self # because some subtables have idential values, we discard
- # the duplicates under the getAllData method. Hence some
- # subtable writers can have more than one parent writer.
- # But we just care about first one right now.
- return subwriter
-
- def writeValue(self, typecode, value):
- self.items.append(struct.pack(f">{typecode}", value))
- def writeArray(self, typecode, values):
- a = array.array(typecode, values)
- if sys.byteorder != "big": a.byteswap()
- self.items.append(a.tobytes())
-
- def writeInt8(self, value):
- assert -128 <= value < 128, value
- self.items.append(struct.pack(">b", value))
- def writeInt8Array(self, values):
- self.writeArray('b', values)
-
- def writeShort(self, value):
- assert -32768 <= value < 32768, value
- self.items.append(struct.pack(">h", value))
- def writeShortArray(self, values):
- self.writeArray('h', values)
-
- def writeLong(self, value):
- self.items.append(struct.pack(">i", value))
- def writeLongArray(self, values):
- self.writeArray('i', values)
-
- def writeUInt8(self, value):
- assert 0 <= value < 256, value
- self.items.append(struct.pack(">B", value))
- def writeUInt8Array(self, values):
- self.writeArray('B', values)
-
- def writeUShort(self, value):
- assert 0 <= value < 0x10000, value
- self.items.append(struct.pack(">H", value))
- def writeUShortArray(self, values):
- self.writeArray('H', values)
-
- def writeULong(self, value):
- self.items.append(struct.pack(">I", value))
- def writeULongArray(self, values):
- self.writeArray('I', values)
-
- def writeUInt24(self, value):
- assert 0 <= value < 0x1000000, value
- b = struct.pack(">L", value)
- self.items.append(b[1:])
- def writeUInt24Array(self, values):
- for value in values:
- self.writeUInt24(value)
-
- def writeTag(self, tag):
- tag = Tag(tag).tobytes()
- assert len(tag) == 4, tag
- self.items.append(tag)
-
- def writeSubTable(self, subWriter):
- self.items.append(subWriter)
-
- def writeCountReference(self, table, name, size=2, value=None):
- ref = CountReference(table, name, size=size, value=value)
- self.items.append(ref)
- return ref
-
- def writeStruct(self, format, values):
- data = struct.pack(*(format,) + values)
- self.items.append(data)
-
- def writeData(self, data):
- self.items.append(data)
-
- def getOverflowErrorRecord(self, item):
- LookupListIndex = SubTableIndex = itemName = itemIndex = None
- if self.name == 'LookupList':
- LookupListIndex = item.repeatIndex
- elif self.name == 'Lookup':
- LookupListIndex = self.repeatIndex
- SubTableIndex = item.repeatIndex
- else:
- itemName = getattr(item, 'name', '<none>')
- if hasattr(item, 'repeatIndex'):
- itemIndex = item.repeatIndex
- if self.name == 'SubTable':
- LookupListIndex = self.parent.repeatIndex
- SubTableIndex = self.repeatIndex
- elif self.name == 'ExtSubTable':
- LookupListIndex = self.parent.parent.repeatIndex
- SubTableIndex = self.parent.repeatIndex
- else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable.
- itemName = ".".join([self.name, itemName])
- p1 = self.parent
- while p1 and p1.name not in ['ExtSubTable', 'SubTable']:
- itemName = ".".join([p1.name, itemName])
- p1 = p1.parent
- if p1:
- if p1.name == 'ExtSubTable':
- LookupListIndex = p1.parent.parent.repeatIndex
- SubTableIndex = p1.parent.repeatIndex
- else:
- LookupListIndex = p1.parent.repeatIndex
- SubTableIndex = p1.repeatIndex
-
- return OverflowErrorRecord( (self.tableTag, LookupListIndex, SubTableIndex, itemName, itemIndex) )
+ """Helper class to gather and assemble data for OpenType tables."""
+
+ def __init__(self, localState=None, tableTag=None):
+ self.items = []
+ self.pos = None
+ self.localState = localState
+ self.tableTag = tableTag
+ self.parent = None
+
+ def __setitem__(self, name, value):
+ state = self.localState.copy() if self.localState else dict()
+ state[name] = value
+ self.localState = state
+
+ def __getitem__(self, name):
+ return self.localState[name]
+
+ def __delitem__(self, name):
+ del self.localState[name]
+
+ # assembler interface
+
+ def getDataLength(self):
+ """Return the length of this table in bytes, without subtables."""
+ l = 0
+ for item in self.items:
+ if hasattr(item, "getCountData"):
+ l += item.size
+ elif hasattr(item, "subWriter"):
+ l += item.offsetSize
+ else:
+ l = l + len(item)
+ return l
+
+ def getData(self):
+ """Assemble the data for this writer/table, without subtables."""
+ items = list(self.items) # make a shallow copy
+ pos = self.pos
+ numItems = len(items)
+ for i in range(numItems):
+ item = items[i]
+
+ if hasattr(item, "subWriter"):
+ if item.offsetSize == 4:
+ items[i] = packULong(item.subWriter.pos - pos)
+ elif item.offsetSize == 2:
+ try:
+ items[i] = packUShort(item.subWriter.pos - pos)
+ except struct.error:
+ # provide data to fix overflow problem.
+ overflowErrorRecord = self.getOverflowErrorRecord(
+ item.subWriter
+ )
+
+ raise OTLOffsetOverflowError(overflowErrorRecord)
+ elif item.offsetSize == 3:
+ items[i] = packUInt24(item.subWriter.pos - pos)
+ else:
+ raise ValueError(item.offsetSize)
+
+ return bytesjoin(items)
+
+ def getDataForHarfbuzz(self):
+ """Assemble the data for this writer/table with all offset field set to 0"""
+ items = list(self.items)
+ packFuncs = {2: packUShort, 3: packUInt24, 4: packULong}
+ for i, item in enumerate(items):
+ if hasattr(item, "subWriter"):
+ # Offset value is not needed in harfbuzz repacker, so setting offset to 0 to avoid overflow here
+ if item.offsetSize in packFuncs:
+ items[i] = packFuncs[item.offsetSize](0)
+ else:
+ raise ValueError(item.offsetSize)
+
+ return bytesjoin(items)
+
+ def __hash__(self):
+ # only works after self._doneWriting() has been called
+ return hash(self.items)
+
+ def __ne__(self, other):
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+ return self.items == other.items
+
+ def _doneWriting(self, internedTables, shareExtension=False):
+ # Convert CountData references to data string items
+ # collapse duplicate table references to a unique entry
+ # "tables" are OTTableWriter objects.
+
+ # For Extension Lookup types, we can
+ # eliminate duplicates only within the tree under the Extension Lookup,
+ # as offsets may exceed 64K even between Extension LookupTable subtables.
+ isExtension = hasattr(self, "Extension")
+
+ # Certain versions of Uniscribe reject the font if the GSUB/GPOS top-level
+ # arrays (ScriptList, FeatureList, LookupList) point to the same, possibly
+ # empty, array. So, we don't share those.
+ # See: https://github.com/fonttools/fonttools/issues/518
+ dontShare = hasattr(self, "DontShare")
+
+ if isExtension and not shareExtension:
+ internedTables = {}
+
+ items = self.items
+ for i in range(len(items)):
+ item = items[i]
+ if hasattr(item, "getCountData"):
+ items[i] = item.getCountData()
+ elif hasattr(item, "subWriter"):
+ item.subWriter._doneWriting(
+ internedTables, shareExtension=shareExtension
+ )
+ # At this point, all subwriters are hashable based on their items.
+ # (See hash and comparison magic methods above.) So the ``setdefault``
+ # call here will return the first writer object we've seen with
+ # equal content, or store it in the dictionary if it's not been
+ # seen yet. We therefore replace the subwriter object with an equivalent
+ # object, which deduplicates the tree.
+ if not dontShare:
+ items[i].subWriter = internedTables.setdefault(
+ item.subWriter, item.subWriter
+ )
+ self.items = tuple(items)
+
+ def _gatherTables(self, tables, extTables, done):
+ # Convert table references in self.items tree to a flat
+ # list of tables in depth-first traversal order.
+ # "tables" are OTTableWriter objects.
+ # We do the traversal in reverse order at each level, in order to
+ # resolve duplicate references to be the last reference in the list of tables.
+ # For extension lookups, duplicate references can be merged only within the
+ # writer tree under the extension lookup.
+
+ done[id(self)] = True
+
+ numItems = len(self.items)
+ iRange = list(range(numItems))
+ iRange.reverse()
+
+ isExtension = hasattr(self, "Extension")
+
+ selfTables = tables
+
+ if isExtension:
+ assert (
+ extTables is not None
+ ), "Program or XML editing error. Extension subtables cannot contain extensions subtables"
+ tables, extTables, done = extTables, None, {}
+
+ # add Coverage table if it is sorted last.
+ sortCoverageLast = False
+ if hasattr(self, "sortCoverageLast"):
+ # Find coverage table
+ for i in range(numItems):
+ item = self.items[i]
+ if (
+ hasattr(item, "subWriter")
+ and getattr(item.subWriter, "name", None) == "Coverage"
+ ):
+ sortCoverageLast = True
+ break
+ if id(item.subWriter) not in done:
+ item.subWriter._gatherTables(tables, extTables, done)
+ else:
+ # We're a new parent of item
+ pass
+
+ for i in iRange:
+ item = self.items[i]
+ if not hasattr(item, "subWriter"):
+ continue
+
+ if (
+ sortCoverageLast
+ and (i == 1)
+ and getattr(item.subWriter, "name", None) == "Coverage"
+ ):
+ # we've already 'gathered' it above
+ continue
+
+ if id(item.subWriter) not in done:
+ item.subWriter._gatherTables(tables, extTables, done)
+ else:
+ # Item is already written out by other parent
+ pass
+
+ selfTables.append(self)
+
+ def _gatherGraphForHarfbuzz(self, tables, obj_list, done, objidx, virtual_edges):
+ real_links = []
+ virtual_links = []
+ item_idx = objidx
+
+ # Merge virtual_links from parent
+ for idx in virtual_edges:
+ virtual_links.append((0, 0, idx))
+
+ sortCoverageLast = False
+ coverage_idx = 0
+ if hasattr(self, "sortCoverageLast"):
+ # Find coverage table
+ for i, item in enumerate(self.items):
+ if getattr(item, "name", None) == "Coverage":
+ sortCoverageLast = True
+ if id(item) not in done:
+ coverage_idx = item_idx = item._gatherGraphForHarfbuzz(
+ tables, obj_list, done, item_idx, virtual_edges
+ )
+ else:
+ coverage_idx = done[id(item)]
+ virtual_edges.append(coverage_idx)
+ break
+
+ child_idx = 0
+ offset_pos = 0
+ for i, item in enumerate(self.items):
+ if hasattr(item, "subWriter"):
+ pos = offset_pos
+ elif hasattr(item, "getCountData"):
+ offset_pos += item.size
+ continue
+ else:
+ offset_pos = offset_pos + len(item)
+ continue
+
+ if id(item.subWriter) not in done:
+ child_idx = item_idx = item.subWriter._gatherGraphForHarfbuzz(
+ tables, obj_list, done, item_idx, virtual_edges
+ )
+ else:
+ child_idx = done[id(item.subWriter)]
+
+ real_edge = (pos, item.offsetSize, child_idx)
+ real_links.append(real_edge)
+ offset_pos += item.offsetSize
+
+ tables.append(self)
+ obj_list.append((real_links, virtual_links))
+ item_idx += 1
+ done[id(self)] = item_idx
+ if sortCoverageLast:
+ virtual_edges.pop()
+
+ return item_idx
+
+ def getAllDataUsingHarfbuzz(self, tableTag):
+ """The Whole table is represented as a Graph.
+ Assemble graph data and call Harfbuzz repacker to pack the table.
+ Harfbuzz repacker is faster and retain as much sub-table sharing as possible, see also:
+ https://github.com/harfbuzz/harfbuzz/blob/main/docs/repacker.md
+ The input format for hb.repack() method is explained here:
+ https://github.com/harfbuzz/uharfbuzz/blob/main/src/uharfbuzz/_harfbuzz.pyx#L1149
+ """
+ internedTables = {}
+ self._doneWriting(internedTables, shareExtension=True)
+ tables = []
+ obj_list = []
+ done = {}
+ objidx = 0
+ virtual_edges = []
+ self._gatherGraphForHarfbuzz(tables, obj_list, done, objidx, virtual_edges)
+ # Gather all data in two passes: the absolute positions of all
+ # subtable are needed before the actual data can be assembled.
+ pos = 0
+ for table in tables:
+ table.pos = pos
+ pos = pos + table.getDataLength()
+
+ data = []
+ for table in tables:
+ tableData = table.getDataForHarfbuzz()
+ data.append(tableData)
+
+ if hasattr(hb, "repack_with_tag"):
+ return hb.repack_with_tag(str(tableTag), data, obj_list)
+ else:
+ return hb.repack(data, obj_list)
+
+ def getAllData(self, remove_duplicate=True):
+ """Assemble all data, including all subtables."""
+ if remove_duplicate:
+ internedTables = {}
+ self._doneWriting(internedTables)
+ tables = []
+ extTables = []
+ done = {}
+ self._gatherTables(tables, extTables, done)
+ tables.reverse()
+ extTables.reverse()
+ # Gather all data in two passes: the absolute positions of all
+ # subtable are needed before the actual data can be assembled.
+ pos = 0
+ for table in tables:
+ table.pos = pos
+ pos = pos + table.getDataLength()
+
+ for table in extTables:
+ table.pos = pos
+ pos = pos + table.getDataLength()
+
+ data = []
+ for table in tables:
+ tableData = table.getData()
+ data.append(tableData)
+
+ for table in extTables:
+ tableData = table.getData()
+ data.append(tableData)
+
+ return bytesjoin(data)
+
+ # interface for gathering data, as used by table.compile()
+
+ def getSubWriter(self):
+ subwriter = self.__class__(self.localState, self.tableTag)
+ subwriter.parent = (
+ self # because some subtables have idential values, we discard
+ )
+ # the duplicates under the getAllData method. Hence some
+ # subtable writers can have more than one parent writer.
+ # But we just care about first one right now.
+ return subwriter
+
+ def writeValue(self, typecode, value):
+ self.items.append(struct.pack(f">{typecode}", value))
+
+ def writeArray(self, typecode, values):
+ a = array.array(typecode, values)
+ if sys.byteorder != "big":
+ a.byteswap()
+ self.items.append(a.tobytes())
+
+ def writeInt8(self, value):
+ assert -128 <= value < 128, value
+ self.items.append(struct.pack(">b", value))
+
+ def writeInt8Array(self, values):
+ self.writeArray("b", values)
+
+ def writeShort(self, value):
+ assert -32768 <= value < 32768, value
+ self.items.append(struct.pack(">h", value))
+
+ def writeShortArray(self, values):
+ self.writeArray("h", values)
+
+ def writeLong(self, value):
+ self.items.append(struct.pack(">i", value))
+
+ def writeLongArray(self, values):
+ self.writeArray("i", values)
+
+ def writeUInt8(self, value):
+ assert 0 <= value < 256, value
+ self.items.append(struct.pack(">B", value))
+
+ def writeUInt8Array(self, values):
+ self.writeArray("B", values)
+
+ def writeUShort(self, value):
+ assert 0 <= value < 0x10000, value
+ self.items.append(struct.pack(">H", value))
+
+ def writeUShortArray(self, values):
+ self.writeArray("H", values)
+
+ def writeULong(self, value):
+ self.items.append(struct.pack(">I", value))
+
+ def writeULongArray(self, values):
+ self.writeArray("I", values)
+
+ def writeUInt24(self, value):
+ assert 0 <= value < 0x1000000, value
+ b = struct.pack(">L", value)
+ self.items.append(b[1:])
+
+ def writeUInt24Array(self, values):
+ for value in values:
+ self.writeUInt24(value)
+
+ def writeTag(self, tag):
+ tag = Tag(tag).tobytes()
+ assert len(tag) == 4, tag
+ self.items.append(tag)
+
+ def writeSubTable(self, subWriter, offsetSize):
+ self.items.append(OffsetToWriter(subWriter, offsetSize))
+
+ def writeCountReference(self, table, name, size=2, value=None):
+ ref = CountReference(table, name, size=size, value=value)
+ self.items.append(ref)
+ return ref
+
+ def writeStruct(self, format, values):
+ data = struct.pack(*(format,) + values)
+ self.items.append(data)
+
+ def writeData(self, data):
+ self.items.append(data)
+
+ def getOverflowErrorRecord(self, item):
+ LookupListIndex = SubTableIndex = itemName = itemIndex = None
+ if self.name == "LookupList":
+ LookupListIndex = item.repeatIndex
+ elif self.name == "Lookup":
+ LookupListIndex = self.repeatIndex
+ SubTableIndex = item.repeatIndex
+ else:
+ itemName = getattr(item, "name", "<none>")
+ if hasattr(item, "repeatIndex"):
+ itemIndex = item.repeatIndex
+ if self.name == "SubTable":
+ LookupListIndex = self.parent.repeatIndex
+ SubTableIndex = self.repeatIndex
+ elif self.name == "ExtSubTable":
+ LookupListIndex = self.parent.parent.repeatIndex
+ SubTableIndex = self.parent.repeatIndex
+ else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable.
+ itemName = ".".join([self.name, itemName])
+ p1 = self.parent
+ while p1 and p1.name not in ["ExtSubTable", "SubTable"]:
+ itemName = ".".join([p1.name, itemName])
+ p1 = p1.parent
+ if p1:
+ if p1.name == "ExtSubTable":
+ LookupListIndex = p1.parent.parent.repeatIndex
+ SubTableIndex = p1.parent.repeatIndex
+ else:
+ LookupListIndex = p1.parent.repeatIndex
+ SubTableIndex = p1.repeatIndex
+
+ return OverflowErrorRecord(
+ (self.tableTag, LookupListIndex, SubTableIndex, itemName, itemIndex)
+ )
class CountReference(object):
- """A reference to a Count value, not a count of references."""
- def __init__(self, table, name, size=None, value=None):
- self.table = table
- self.name = name
- self.size = size
- if value is not None:
- self.setValue(value)
- def setValue(self, value):
- table = self.table
- name = self.name
- if table[name] is None:
- table[name] = value
- else:
- assert table[name] == value, (name, table[name], value)
- def getValue(self):
- return self.table[self.name]
- def getCountData(self):
- v = self.table[self.name]
- if v is None: v = 0
- return {1:packUInt8, 2:packUShort, 4:packULong}[self.size](v)
-
-
-def packUInt8 (value):
- return struct.pack(">B", value)
+ """A reference to a Count value, not a count of references."""
+
+ def __init__(self, table, name, size=None, value=None):
+ self.table = table
+ self.name = name
+ self.size = size
+ if value is not None:
+ self.setValue(value)
+
+ def setValue(self, value):
+ table = self.table
+ name = self.name
+ if table[name] is None:
+ table[name] = value
+ else:
+ assert table[name] == value, (name, table[name], value)
+
+ def getValue(self):
+ return self.table[self.name]
+
+ def getCountData(self):
+ v = self.table[self.name]
+ if v is None:
+ v = 0
+ return {1: packUInt8, 2: packUShort, 4: packULong}[self.size](v)
+
+
+def packUInt8(value):
+ return struct.pack(">B", value)
+
def packUShort(value):
- return struct.pack(">H", value)
+ return struct.pack(">H", value)
+
def packULong(value):
- assert 0 <= value < 0x100000000, value
- return struct.pack(">I", value)
+ assert 0 <= value < 0x100000000, value
+ return struct.pack(">I", value)
+
def packUInt24(value):
- assert 0 <= value < 0x1000000, value
- return struct.pack(">I", value)[1:]
+ assert 0 <= value < 0x1000000, value
+ return struct.pack(">I", value)[1:]
class BaseTable(object):
- """Generic base class for all OpenType (sub)tables."""
-
- def __getattr__(self, attr):
- reader = self.__dict__.get("reader")
- if reader:
- del self.reader
- font = self.font
- del self.font
- self.decompile(reader, font)
- return getattr(self, attr)
-
- raise AttributeError(attr)
-
- def ensureDecompiled(self, recurse=False):
- reader = self.__dict__.get("reader")
- if reader:
- del self.reader
- font = self.font
- del self.font
- self.decompile(reader, font)
- if recurse:
- for subtable in self.iterSubTables():
- subtable.value.ensureDecompiled(recurse)
-
- @classmethod
- def getRecordSize(cls, reader):
- totalSize = 0
- for conv in cls.converters:
- size = conv.getRecordSize(reader)
- if size is NotImplemented: return NotImplemented
- countValue = 1
- if conv.repeat:
- if conv.repeat in reader:
- countValue = reader[conv.repeat] + conv.aux
- else:
- return NotImplemented
- totalSize += size * countValue
- return totalSize
-
- def getConverters(self):
- return self.converters
-
- def getConverterByName(self, name):
- return self.convertersByName[name]
-
- def populateDefaults(self, propagator=None):
- for conv in self.getConverters():
- if conv.repeat:
- if not hasattr(self, conv.name):
- setattr(self, conv.name, [])
- countValue = len(getattr(self, conv.name)) - conv.aux
- try:
- count_conv = self.getConverterByName(conv.repeat)
- setattr(self, conv.repeat, countValue)
- except KeyError:
- # conv.repeat is a propagated count
- if propagator and conv.repeat in propagator:
- propagator[conv.repeat].setValue(countValue)
- else:
- if conv.aux and not eval(conv.aux, None, self.__dict__):
- continue
- if hasattr(self, conv.name):
- continue # Warn if it should NOT be present?!
- if hasattr(conv, 'writeNullOffset'):
- setattr(self, conv.name, None) # Warn?
- #elif not conv.isCount:
- # # Warn?
- # pass
- if hasattr(conv, "DEFAULT"):
- # OptionalValue converters (e.g. VarIndex)
- setattr(self, conv.name, conv.DEFAULT)
-
- def decompile(self, reader, font):
- self.readFormat(reader)
- table = {}
- self.__rawTable = table # for debugging
- for conv in self.getConverters():
- if conv.name == "SubTable":
- conv = conv.getConverter(reader.tableTag,
- table["LookupType"])
- if conv.name == "ExtSubTable":
- conv = conv.getConverter(reader.tableTag,
- table["ExtensionLookupType"])
- if conv.name == "FeatureParams":
- conv = conv.getConverter(reader["FeatureTag"])
- if conv.name == "SubStruct":
- conv = conv.getConverter(reader.tableTag,
- table["MorphType"])
- try:
- if conv.repeat:
- if isinstance(conv.repeat, int):
- countValue = conv.repeat
- elif conv.repeat in table:
- countValue = table[conv.repeat]
- else:
- # conv.repeat is a propagated count
- countValue = reader[conv.repeat]
- countValue += conv.aux
- table[conv.name] = conv.readArray(reader, font, table, countValue)
- else:
- if conv.aux and not eval(conv.aux, None, table):
- continue
- table[conv.name] = conv.read(reader, font, table)
- if conv.isPropagated:
- reader[conv.name] = table[conv.name]
- except Exception as e:
- name = conv.name
- e.args = e.args + (name,)
- raise
-
- if hasattr(self, 'postRead'):
- self.postRead(table, font)
- else:
- self.__dict__.update(table)
-
- del self.__rawTable # succeeded, get rid of debugging info
-
- def compile(self, writer, font):
- self.ensureDecompiled()
- # TODO Following hack to be removed by rewriting how FormatSwitching tables
- # are handled.
- # https://github.com/fonttools/fonttools/pull/2238#issuecomment-805192631
- if hasattr(self, 'preWrite'):
- deleteFormat = not hasattr(self, 'Format')
- table = self.preWrite(font)
- deleteFormat = deleteFormat and hasattr(self, 'Format')
- else:
- deleteFormat = False
- table = self.__dict__.copy()
-
- # some count references may have been initialized in a custom preWrite; we set
- # these in the writer's state beforehand (instead of sequentially) so they will
- # be propagated to all nested subtables even if the count appears in the current
- # table only *after* the offset to the subtable that it is counting.
- for conv in self.getConverters():
- if conv.isCount and conv.isPropagated:
- value = table.get(conv.name)
- if isinstance(value, CountReference):
- writer[conv.name] = value
-
- if hasattr(self, 'sortCoverageLast'):
- writer.sortCoverageLast = 1
-
- if hasattr(self, 'DontShare'):
- writer.DontShare = True
-
- if hasattr(self.__class__, 'LookupType'):
- writer['LookupType'].setValue(self.__class__.LookupType)
-
- self.writeFormat(writer)
- for conv in self.getConverters():
- value = table.get(conv.name) # TODO Handle defaults instead of defaulting to None!
- if conv.repeat:
- if value is None:
- value = []
- countValue = len(value) - conv.aux
- if isinstance(conv.repeat, int):
- assert len(value) == conv.repeat, 'expected %d values, got %d' % (conv.repeat, len(value))
- elif conv.repeat in table:
- CountReference(table, conv.repeat, value=countValue)
- else:
- # conv.repeat is a propagated count
- writer[conv.repeat].setValue(countValue)
- try:
- conv.writeArray(writer, font, table, value)
- except Exception as e:
- e.args = e.args + (conv.name+'[]',)
- raise
- elif conv.isCount:
- # Special-case Count values.
- # Assumption: a Count field will *always* precede
- # the actual array(s).
- # We need a default value, as it may be set later by a nested
- # table. We will later store it here.
- # We add a reference: by the time the data is assembled
- # the Count value will be filled in.
- # We ignore the current count value since it will be recomputed,
- # unless it's a CountReference that was already initialized in a custom preWrite.
- if isinstance(value, CountReference):
- ref = value
- ref.size = conv.staticSize
- writer.writeData(ref)
- table[conv.name] = ref.getValue()
- else:
- ref = writer.writeCountReference(table, conv.name, conv.staticSize)
- table[conv.name] = None
- if conv.isPropagated:
- writer[conv.name] = ref
- elif conv.isLookupType:
- # We make sure that subtables have the same lookup type,
- # and that the type is the same as the one set on the
- # Lookup object, if any is set.
- if conv.name not in table:
- table[conv.name] = None
- ref = writer.writeCountReference(table, conv.name, conv.staticSize, table[conv.name])
- writer['LookupType'] = ref
- else:
- if conv.aux and not eval(conv.aux, None, table):
- continue
- try:
- conv.write(writer, font, table, value)
- except Exception as e:
- name = value.__class__.__name__ if value is not None else conv.name
- e.args = e.args + (name,)
- raise
- if conv.isPropagated:
- writer[conv.name] = value
-
- if deleteFormat:
- del self.Format
-
- def readFormat(self, reader):
- pass
-
- def writeFormat(self, writer):
- pass
-
- def toXML(self, xmlWriter, font, attrs=None, name=None):
- tableName = name if name else self.__class__.__name__
- if attrs is None:
- attrs = []
- if hasattr(self, "Format"):
- attrs = attrs + [("Format", self.Format)]
- xmlWriter.begintag(tableName, attrs)
- xmlWriter.newline()
- self.toXML2(xmlWriter, font)
- xmlWriter.endtag(tableName)
- xmlWriter.newline()
-
- def toXML2(self, xmlWriter, font):
- # Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB).
- # This is because in TTX our parent writes our main tag, and in otBase.py we
- # do it ourselves. I think I'm getting schizophrenic...
- for conv in self.getConverters():
- if conv.repeat:
- value = getattr(self, conv.name, [])
- for i in range(len(value)):
- item = value[i]
- conv.xmlWrite(xmlWriter, font, item, conv.name,
- [("index", i)])
- else:
- if conv.aux and not eval(conv.aux, None, vars(self)):
- continue
- value = getattr(self, conv.name, None) # TODO Handle defaults instead of defaulting to None!
- conv.xmlWrite(xmlWriter, font, value, conv.name, [])
-
- def fromXML(self, name, attrs, content, font):
- try:
- conv = self.getConverterByName(name)
- except KeyError:
- raise # XXX on KeyError, raise nice error
- value = conv.xmlRead(attrs, content, font)
- if conv.repeat:
- seq = getattr(self, conv.name, None)
- if seq is None:
- seq = []
- setattr(self, conv.name, seq)
- seq.append(value)
- else:
- setattr(self, conv.name, value)
-
- def __ne__(self, other):
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
-
- def __eq__(self, other):
- if type(self) != type(other):
- return NotImplemented
-
- self.ensureDecompiled()
- other.ensureDecompiled()
-
- return self.__dict__ == other.__dict__
-
- class SubTableEntry(NamedTuple):
- """See BaseTable.iterSubTables()"""
- name: str
- value: "BaseTable"
- index: Optional[int] = None # index into given array, None for single values
-
- def iterSubTables(self) -> Iterator[SubTableEntry]:
- """Yield (name, value, index) namedtuples for all subtables of current table.
-
- A sub-table is an instance of BaseTable (or subclass thereof) that is a child
- of self, the current parent table.
- The tuples also contain the attribute name (str) of the of parent table to get
- a subtable, and optionally, for lists of subtables (i.e. attributes associated
- with a converter that has a 'repeat'), an index into the list containing the
- given subtable value.
- This method can be useful to traverse trees of otTables.
- """
- for conv in self.getConverters():
- name = conv.name
- value = getattr(self, name, None)
- if value is None:
- continue
- if isinstance(value, BaseTable):
- yield self.SubTableEntry(name, value)
- elif isinstance(value, list):
- yield from (
- self.SubTableEntry(name, v, index=i)
- for i, v in enumerate(value)
- if isinstance(v, BaseTable)
- )
-
- # instance (not @class)method for consistency with FormatSwitchingBaseTable
- def getVariableAttrs(self):
- return getVariableAttrs(self.__class__)
+ """Generic base class for all OpenType (sub)tables."""
+
+ def __getattr__(self, attr):
+ reader = self.__dict__.get("reader")
+ if reader:
+ del self.reader
+ font = self.font
+ del self.font
+ self.decompile(reader, font)
+ return getattr(self, attr)
+
+ raise AttributeError(attr)
+
+ def ensureDecompiled(self, recurse=False):
+ reader = self.__dict__.get("reader")
+ if reader:
+ del self.reader
+ font = self.font
+ del self.font
+ self.decompile(reader, font)
+ if recurse:
+ for subtable in self.iterSubTables():
+ subtable.value.ensureDecompiled(recurse)
+
+ def __getstate__(self):
+ # before copying/pickling 'lazy' objects, make a shallow copy of OTTableReader
+ # https://github.com/fonttools/fonttools/issues/2965
+ if "reader" in self.__dict__:
+ state = self.__dict__.copy()
+ state["reader"] = self.__dict__["reader"].copy()
+ return state
+ return self.__dict__
+
+ @classmethod
+ def getRecordSize(cls, reader):
+ totalSize = 0
+ for conv in cls.converters:
+ size = conv.getRecordSize(reader)
+ if size is NotImplemented:
+ return NotImplemented
+ countValue = 1
+ if conv.repeat:
+ if conv.repeat in reader:
+ countValue = reader[conv.repeat] + conv.aux
+ else:
+ return NotImplemented
+ totalSize += size * countValue
+ return totalSize
+
+ def getConverters(self):
+ return self.converters
+
+ def getConverterByName(self, name):
+ return self.convertersByName[name]
+
+ def populateDefaults(self, propagator=None):
+ for conv in self.getConverters():
+ if conv.repeat:
+ if not hasattr(self, conv.name):
+ setattr(self, conv.name, [])
+ countValue = len(getattr(self, conv.name)) - conv.aux
+ try:
+ count_conv = self.getConverterByName(conv.repeat)
+ setattr(self, conv.repeat, countValue)
+ except KeyError:
+ # conv.repeat is a propagated count
+ if propagator and conv.repeat in propagator:
+ propagator[conv.repeat].setValue(countValue)
+ else:
+ if conv.aux and not eval(conv.aux, None, self.__dict__):
+ continue
+ if hasattr(self, conv.name):
+ continue # Warn if it should NOT be present?!
+ if hasattr(conv, "writeNullOffset"):
+ setattr(self, conv.name, None) # Warn?
+ # elif not conv.isCount:
+ # # Warn?
+ # pass
+ if hasattr(conv, "DEFAULT"):
+ # OptionalValue converters (e.g. VarIndex)
+ setattr(self, conv.name, conv.DEFAULT)
+
+ def decompile(self, reader, font):
+ self.readFormat(reader)
+ table = {}
+ self.__rawTable = table # for debugging
+ for conv in self.getConverters():
+ if conv.name == "SubTable":
+ conv = conv.getConverter(reader.tableTag, table["LookupType"])
+ if conv.name == "ExtSubTable":
+ conv = conv.getConverter(reader.tableTag, table["ExtensionLookupType"])
+ if conv.name == "FeatureParams":
+ conv = conv.getConverter(reader["FeatureTag"])
+ if conv.name == "SubStruct":
+ conv = conv.getConverter(reader.tableTag, table["MorphType"])
+ try:
+ if conv.repeat:
+ if isinstance(conv.repeat, int):
+ countValue = conv.repeat
+ elif conv.repeat in table:
+ countValue = table[conv.repeat]
+ else:
+ # conv.repeat is a propagated count
+ countValue = reader[conv.repeat]
+ countValue += conv.aux
+ table[conv.name] = conv.readArray(reader, font, table, countValue)
+ else:
+ if conv.aux and not eval(conv.aux, None, table):
+ continue
+ table[conv.name] = conv.read(reader, font, table)
+ if conv.isPropagated:
+ reader[conv.name] = table[conv.name]
+ except Exception as e:
+ name = conv.name
+ e.args = e.args + (name,)
+ raise
+
+ if hasattr(self, "postRead"):
+ self.postRead(table, font)
+ else:
+ self.__dict__.update(table)
+
+ del self.__rawTable # succeeded, get rid of debugging info
+
+ def compile(self, writer, font):
+ self.ensureDecompiled()
+ # TODO Following hack to be removed by rewriting how FormatSwitching tables
+ # are handled.
+ # https://github.com/fonttools/fonttools/pull/2238#issuecomment-805192631
+ if hasattr(self, "preWrite"):
+ deleteFormat = not hasattr(self, "Format")
+ table = self.preWrite(font)
+ deleteFormat = deleteFormat and hasattr(self, "Format")
+ else:
+ deleteFormat = False
+ table = self.__dict__.copy()
+
+ # some count references may have been initialized in a custom preWrite; we set
+ # these in the writer's state beforehand (instead of sequentially) so they will
+ # be propagated to all nested subtables even if the count appears in the current
+ # table only *after* the offset to the subtable that it is counting.
+ for conv in self.getConverters():
+ if conv.isCount and conv.isPropagated:
+ value = table.get(conv.name)
+ if isinstance(value, CountReference):
+ writer[conv.name] = value
+
+ if hasattr(self, "sortCoverageLast"):
+ writer.sortCoverageLast = 1
+
+ if hasattr(self, "DontShare"):
+ writer.DontShare = True
+
+ if hasattr(self.__class__, "LookupType"):
+ writer["LookupType"].setValue(self.__class__.LookupType)
+
+ self.writeFormat(writer)
+ for conv in self.getConverters():
+ value = table.get(
+ conv.name
+ ) # TODO Handle defaults instead of defaulting to None!
+ if conv.repeat:
+ if value is None:
+ value = []
+ countValue = len(value) - conv.aux
+ if isinstance(conv.repeat, int):
+ assert len(value) == conv.repeat, "expected %d values, got %d" % (
+ conv.repeat,
+ len(value),
+ )
+ elif conv.repeat in table:
+ CountReference(table, conv.repeat, value=countValue)
+ else:
+ # conv.repeat is a propagated count
+ writer[conv.repeat].setValue(countValue)
+ try:
+ conv.writeArray(writer, font, table, value)
+ except Exception as e:
+ e.args = e.args + (conv.name + "[]",)
+ raise
+ elif conv.isCount:
+ # Special-case Count values.
+ # Assumption: a Count field will *always* precede
+ # the actual array(s).
+ # We need a default value, as it may be set later by a nested
+ # table. We will later store it here.
+ # We add a reference: by the time the data is assembled
+ # the Count value will be filled in.
+ # We ignore the current count value since it will be recomputed,
+ # unless it's a CountReference that was already initialized in a custom preWrite.
+ if isinstance(value, CountReference):
+ ref = value
+ ref.size = conv.staticSize
+ writer.writeData(ref)
+ table[conv.name] = ref.getValue()
+ else:
+ ref = writer.writeCountReference(table, conv.name, conv.staticSize)
+ table[conv.name] = None
+ if conv.isPropagated:
+ writer[conv.name] = ref
+ elif conv.isLookupType:
+ # We make sure that subtables have the same lookup type,
+ # and that the type is the same as the one set on the
+ # Lookup object, if any is set.
+ if conv.name not in table:
+ table[conv.name] = None
+ ref = writer.writeCountReference(
+ table, conv.name, conv.staticSize, table[conv.name]
+ )
+ writer["LookupType"] = ref
+ else:
+ if conv.aux and not eval(conv.aux, None, table):
+ continue
+ try:
+ conv.write(writer, font, table, value)
+ except Exception as e:
+ name = value.__class__.__name__ if value is not None else conv.name
+ e.args = e.args + (name,)
+ raise
+ if conv.isPropagated:
+ writer[conv.name] = value
+
+ if deleteFormat:
+ del self.Format
+
+ def readFormat(self, reader):
+ pass
+
+ def writeFormat(self, writer):
+ pass
+
+ def toXML(self, xmlWriter, font, attrs=None, name=None):
+ tableName = name if name else self.__class__.__name__
+ if attrs is None:
+ attrs = []
+ if hasattr(self, "Format"):
+ attrs = attrs + [("Format", self.Format)]
+ xmlWriter.begintag(tableName, attrs)
+ xmlWriter.newline()
+ self.toXML2(xmlWriter, font)
+ xmlWriter.endtag(tableName)
+ xmlWriter.newline()
+
+ def toXML2(self, xmlWriter, font):
+ # Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB).
+ # This is because in TTX our parent writes our main tag, and in otBase.py we
+ # do it ourselves. I think I'm getting schizophrenic...
+ for conv in self.getConverters():
+ if conv.repeat:
+ value = getattr(self, conv.name, [])
+ for i in range(len(value)):
+ item = value[i]
+ conv.xmlWrite(xmlWriter, font, item, conv.name, [("index", i)])
+ else:
+ if conv.aux and not eval(conv.aux, None, vars(self)):
+ continue
+ value = getattr(
+ self, conv.name, None
+ ) # TODO Handle defaults instead of defaulting to None!
+ conv.xmlWrite(xmlWriter, font, value, conv.name, [])
+
+ def fromXML(self, name, attrs, content, font):
+ try:
+ conv = self.getConverterByName(name)
+ except KeyError:
+ raise # XXX on KeyError, raise nice error
+ value = conv.xmlRead(attrs, content, font)
+ if conv.repeat:
+ seq = getattr(self, conv.name, None)
+ if seq is None:
+ seq = []
+ setattr(self, conv.name, seq)
+ seq.append(value)
+ else:
+ setattr(self, conv.name, value)
+
+ def __ne__(self, other):
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+
+ self.ensureDecompiled()
+ other.ensureDecompiled()
+
+ return self.__dict__ == other.__dict__
+
+ class SubTableEntry(NamedTuple):
+ """See BaseTable.iterSubTables()"""
+
+ name: str
+ value: "BaseTable"
+ index: Optional[int] = None # index into given array, None for single values
+
+ def iterSubTables(self) -> Iterator[SubTableEntry]:
+ """Yield (name, value, index) namedtuples for all subtables of current table.
+
+ A sub-table is an instance of BaseTable (or subclass thereof) that is a child
+ of self, the current parent table.
+ The tuples also contain the attribute name (str) of the of parent table to get
+ a subtable, and optionally, for lists of subtables (i.e. attributes associated
+ with a converter that has a 'repeat'), an index into the list containing the
+ given subtable value.
+ This method can be useful to traverse trees of otTables.
+ """
+ for conv in self.getConverters():
+ name = conv.name
+ value = getattr(self, name, None)
+ if value is None:
+ continue
+ if isinstance(value, BaseTable):
+ yield self.SubTableEntry(name, value)
+ elif isinstance(value, list):
+ yield from (
+ self.SubTableEntry(name, v, index=i)
+ for i, v in enumerate(value)
+ if isinstance(v, BaseTable)
+ )
+
+ # instance (not @class)method for consistency with FormatSwitchingBaseTable
+ def getVariableAttrs(self):
+ return getVariableAttrs(self.__class__)
class FormatSwitchingBaseTable(BaseTable):
- """Minor specialization of BaseTable, for tables that have multiple
- formats, eg. CoverageFormat1 vs. CoverageFormat2."""
+ """Minor specialization of BaseTable, for tables that have multiple
+ formats, eg. CoverageFormat1 vs. CoverageFormat2."""
- @classmethod
- def getRecordSize(cls, reader):
- return NotImplemented
+ @classmethod
+ def getRecordSize(cls, reader):
+ return NotImplemented
- def getConverters(self):
- try:
- fmt = self.Format
- except AttributeError:
- # some FormatSwitchingBaseTables (e.g. Coverage) no longer have 'Format'
- # attribute after fully decompiled, only gain one in preWrite before being
- # recompiled. In the decompiled state, these hand-coded classes defined in
- # otTables.py lose their format-specific nature and gain more high-level
- # attributes that are not tied to converters.
- return []
- return self.converters.get(self.Format, [])
+ def getConverters(self):
+ try:
+ fmt = self.Format
+ except AttributeError:
+ # some FormatSwitchingBaseTables (e.g. Coverage) no longer have 'Format'
+ # attribute after fully decompiled, only gain one in preWrite before being
+ # recompiled. In the decompiled state, these hand-coded classes defined in
+ # otTables.py lose their format-specific nature and gain more high-level
+ # attributes that are not tied to converters.
+ return []
+ return self.converters.get(self.Format, [])
- def getConverterByName(self, name):
- return self.convertersByName[self.Format][name]
+ def getConverterByName(self, name):
+ return self.convertersByName[self.Format][name]
- def readFormat(self, reader):
- self.Format = reader.readUShort()
+ def readFormat(self, reader):
+ self.Format = reader.readUShort()
- def writeFormat(self, writer):
- writer.writeUShort(self.Format)
+ def writeFormat(self, writer):
+ writer.writeUShort(self.Format)
- def toXML(self, xmlWriter, font, attrs=None, name=None):
- BaseTable.toXML(self, xmlWriter, font, attrs, name)
+ def toXML(self, xmlWriter, font, attrs=None, name=None):
+ BaseTable.toXML(self, xmlWriter, font, attrs, name)
- def getVariableAttrs(self):
- return getVariableAttrs(self.__class__, self.Format)
+ def getVariableAttrs(self):
+ return getVariableAttrs(self.__class__, self.Format)
class UInt8FormatSwitchingBaseTable(FormatSwitchingBaseTable):
- def readFormat(self, reader):
- self.Format = reader.readUInt8()
+ def readFormat(self, reader):
+ self.Format = reader.readUInt8()
- def writeFormat(self, writer):
- writer.writeUInt8(self.Format)
+ def writeFormat(self, writer):
+ writer.writeUInt8(self.Format)
formatSwitchingBaseTables = {
- "uint16": FormatSwitchingBaseTable,
- "uint8": UInt8FormatSwitchingBaseTable,
+ "uint16": FormatSwitchingBaseTable,
+ "uint8": UInt8FormatSwitchingBaseTable,
}
+
def getFormatSwitchingBaseTableClass(formatType):
- try:
- return formatSwitchingBaseTables[formatType]
- except KeyError:
- raise TypeError(f"Unsupported format type: {formatType!r}")
+ try:
+ return formatSwitchingBaseTables[formatType]
+ except KeyError:
+ raise TypeError(f"Unsupported format type: {formatType!r}")
# memoize since these are parsed from otData.py, thus stay constant
@lru_cache()
def getVariableAttrs(cls: BaseTable, fmt: Optional[int] = None) -> Tuple[str]:
- """Return sequence of variable table field names (can be empty).
-
- Attributes are deemed "variable" when their otData.py's description contain
- 'VarIndexBase + {offset}', e.g. COLRv1 PaintVar* tables.
- """
- if not issubclass(cls, BaseTable):
- raise TypeError(cls)
- if issubclass(cls, FormatSwitchingBaseTable):
- if fmt is None:
- raise TypeError(f"'fmt' is required for format-switching {cls.__name__}")
- converters = cls.convertersByName[fmt]
- else:
- converters = cls.convertersByName
- # assume if no 'VarIndexBase' field is present, table has no variable fields
- if "VarIndexBase" not in converters:
- return ()
- varAttrs = {}
- for name, conv in converters.items():
- offset = conv.getVarIndexOffset()
- if offset is not None:
- varAttrs[name] = offset
- return tuple(sorted(varAttrs, key=varAttrs.__getitem__))
+ """Return sequence of variable table field names (can be empty).
+
+ Attributes are deemed "variable" when their otData.py's description contain
+ 'VarIndexBase + {offset}', e.g. COLRv1 PaintVar* tables.
+ """
+ if not issubclass(cls, BaseTable):
+ raise TypeError(cls)
+ if issubclass(cls, FormatSwitchingBaseTable):
+ if fmt is None:
+ raise TypeError(f"'fmt' is required for format-switching {cls.__name__}")
+ converters = cls.convertersByName[fmt]
+ else:
+ converters = cls.convertersByName
+ # assume if no 'VarIndexBase' field is present, table has no variable fields
+ if "VarIndexBase" not in converters:
+ return ()
+ varAttrs = {}
+ for name, conv in converters.items():
+ offset = conv.getVarIndexOffset()
+ if offset is not None:
+ varAttrs[name] = offset
+ return tuple(sorted(varAttrs, key=varAttrs.__getitem__))
#
@@ -1206,163 +1303,166 @@ def getVariableAttrs(cls: BaseTable, fmt: Optional[int] = None) -> Tuple[str]:
#
valueRecordFormat = [
-# Mask Name isDevice signed
- (0x0001, "XPlacement", 0, 1),
- (0x0002, "YPlacement", 0, 1),
- (0x0004, "XAdvance", 0, 1),
- (0x0008, "YAdvance", 0, 1),
- (0x0010, "XPlaDevice", 1, 0),
- (0x0020, "YPlaDevice", 1, 0),
- (0x0040, "XAdvDevice", 1, 0),
- (0x0080, "YAdvDevice", 1, 0),
-# reserved:
- (0x0100, "Reserved1", 0, 0),
- (0x0200, "Reserved2", 0, 0),
- (0x0400, "Reserved3", 0, 0),
- (0x0800, "Reserved4", 0, 0),
- (0x1000, "Reserved5", 0, 0),
- (0x2000, "Reserved6", 0, 0),
- (0x4000, "Reserved7", 0, 0),
- (0x8000, "Reserved8", 0, 0),
+ # Mask Name isDevice signed
+ (0x0001, "XPlacement", 0, 1),
+ (0x0002, "YPlacement", 0, 1),
+ (0x0004, "XAdvance", 0, 1),
+ (0x0008, "YAdvance", 0, 1),
+ (0x0010, "XPlaDevice", 1, 0),
+ (0x0020, "YPlaDevice", 1, 0),
+ (0x0040, "XAdvDevice", 1, 0),
+ (0x0080, "YAdvDevice", 1, 0),
+ # reserved:
+ (0x0100, "Reserved1", 0, 0),
+ (0x0200, "Reserved2", 0, 0),
+ (0x0400, "Reserved3", 0, 0),
+ (0x0800, "Reserved4", 0, 0),
+ (0x1000, "Reserved5", 0, 0),
+ (0x2000, "Reserved6", 0, 0),
+ (0x4000, "Reserved7", 0, 0),
+ (0x8000, "Reserved8", 0, 0),
]
+
def _buildDict():
- d = {}
- for mask, name, isDevice, signed in valueRecordFormat:
- d[name] = mask, isDevice, signed
- return d
+ d = {}
+ for mask, name, isDevice, signed in valueRecordFormat:
+ d[name] = mask, isDevice, signed
+ return d
+
valueRecordFormatDict = _buildDict()
class ValueRecordFactory(object):
- """Given a format code, this object convert ValueRecords."""
-
- def __init__(self, valueFormat):
- format = []
- for mask, name, isDevice, signed in valueRecordFormat:
- if valueFormat & mask:
- format.append((name, isDevice, signed))
- self.format = format
-
- def __len__(self):
- return len(self.format)
-
- def readValueRecord(self, reader, font):
- format = self.format
- if not format:
- return None
- valueRecord = ValueRecord()
- for name, isDevice, signed in format:
- if signed:
- value = reader.readShort()
- else:
- value = reader.readUShort()
- if isDevice:
- if value:
- from . import otTables
- subReader = reader.getSubReader(value)
- value = getattr(otTables, name)()
- value.decompile(subReader, font)
- else:
- value = None
- setattr(valueRecord, name, value)
- return valueRecord
-
- def writeValueRecord(self, writer, font, valueRecord):
- for name, isDevice, signed in self.format:
- value = getattr(valueRecord, name, 0)
- if isDevice:
- if value:
- subWriter = writer.getSubWriter()
- writer.writeSubTable(subWriter)
- value.compile(subWriter, font)
- else:
- writer.writeUShort(0)
- elif signed:
- writer.writeShort(value)
- else:
- writer.writeUShort(value)
+ """Given a format code, this object convert ValueRecords."""
+
+ def __init__(self, valueFormat):
+ format = []
+ for mask, name, isDevice, signed in valueRecordFormat:
+ if valueFormat & mask:
+ format.append((name, isDevice, signed))
+ self.format = format
+
+ def __len__(self):
+ return len(self.format)
+
+ def readValueRecord(self, reader, font):
+ format = self.format
+ if not format:
+ return None
+ valueRecord = ValueRecord()
+ for name, isDevice, signed in format:
+ if signed:
+ value = reader.readShort()
+ else:
+ value = reader.readUShort()
+ if isDevice:
+ if value:
+ from . import otTables
+
+ subReader = reader.getSubReader(value)
+ value = getattr(otTables, name)()
+ value.decompile(subReader, font)
+ else:
+ value = None
+ setattr(valueRecord, name, value)
+ return valueRecord
+
+ def writeValueRecord(self, writer, font, valueRecord):
+ for name, isDevice, signed in self.format:
+ value = getattr(valueRecord, name, 0)
+ if isDevice:
+ if value:
+ subWriter = writer.getSubWriter()
+ writer.writeSubTable(subWriter, offsetSize=2)
+ value.compile(subWriter, font)
+ else:
+ writer.writeUShort(0)
+ elif signed:
+ writer.writeShort(value)
+ else:
+ writer.writeUShort(value)
class ValueRecord(object):
-
- # see ValueRecordFactory
-
- def __init__(self, valueFormat=None, src=None):
- if valueFormat is not None:
- for mask, name, isDevice, signed in valueRecordFormat:
- if valueFormat & mask:
- setattr(self, name, None if isDevice else 0)
- if src is not None:
- for key,val in src.__dict__.items():
- if not hasattr(self, key):
- continue
- setattr(self, key, val)
- elif src is not None:
- self.__dict__ = src.__dict__.copy()
-
- def getFormat(self):
- format = 0
- for name in self.__dict__.keys():
- format = format | valueRecordFormatDict[name][0]
- return format
-
- def getEffectiveFormat(self):
- format = 0
- for name,value in self.__dict__.items():
- if value:
- format = format | valueRecordFormatDict[name][0]
- return format
-
- def toXML(self, xmlWriter, font, valueName, attrs=None):
- if attrs is None:
- simpleItems = []
- else:
- simpleItems = list(attrs)
- for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values
- if hasattr(self, name):
- simpleItems.append((name, getattr(self, name)))
- deviceItems = []
- for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records
- if hasattr(self, name):
- device = getattr(self, name)
- if device is not None:
- deviceItems.append((name, device))
- if deviceItems:
- xmlWriter.begintag(valueName, simpleItems)
- xmlWriter.newline()
- for name, deviceRecord in deviceItems:
- if deviceRecord is not None:
- deviceRecord.toXML(xmlWriter, font, name=name)
- xmlWriter.endtag(valueName)
- xmlWriter.newline()
- else:
- xmlWriter.simpletag(valueName, simpleItems)
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- from . import otTables
- for k, v in attrs.items():
- setattr(self, k, int(v))
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- value = getattr(otTables, name)()
- for elem2 in content:
- if not isinstance(elem2, tuple):
- continue
- name2, attrs2, content2 = elem2
- value.fromXML(name2, attrs2, content2, font)
- setattr(self, name, value)
-
- def __ne__(self, other):
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
-
- def __eq__(self, other):
- if type(self) != type(other):
- return NotImplemented
- return self.__dict__ == other.__dict__
+ # see ValueRecordFactory
+
+ def __init__(self, valueFormat=None, src=None):
+ if valueFormat is not None:
+ for mask, name, isDevice, signed in valueRecordFormat:
+ if valueFormat & mask:
+ setattr(self, name, None if isDevice else 0)
+ if src is not None:
+ for key, val in src.__dict__.items():
+ if not hasattr(self, key):
+ continue
+ setattr(self, key, val)
+ elif src is not None:
+ self.__dict__ = src.__dict__.copy()
+
+ def getFormat(self):
+ format = 0
+ for name in self.__dict__.keys():
+ format = format | valueRecordFormatDict[name][0]
+ return format
+
+ def getEffectiveFormat(self):
+ format = 0
+ for name, value in self.__dict__.items():
+ if value:
+ format = format | valueRecordFormatDict[name][0]
+ return format
+
+ def toXML(self, xmlWriter, font, valueName, attrs=None):
+ if attrs is None:
+ simpleItems = []
+ else:
+ simpleItems = list(attrs)
+ for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values
+ if hasattr(self, name):
+ simpleItems.append((name, getattr(self, name)))
+ deviceItems = []
+ for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records
+ if hasattr(self, name):
+ device = getattr(self, name)
+ if device is not None:
+ deviceItems.append((name, device))
+ if deviceItems:
+ xmlWriter.begintag(valueName, simpleItems)
+ xmlWriter.newline()
+ for name, deviceRecord in deviceItems:
+ if deviceRecord is not None:
+ deviceRecord.toXML(xmlWriter, font, name=name)
+ xmlWriter.endtag(valueName)
+ xmlWriter.newline()
+ else:
+ xmlWriter.simpletag(valueName, simpleItems)
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ from . import otTables
+
+ for k, v in attrs.items():
+ setattr(self, k, int(v))
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ value = getattr(otTables, name)()
+ for elem2 in content:
+ if not isinstance(elem2, tuple):
+ continue
+ name2, attrs2, content2 = elem2
+ value.fromXML(name2, attrs2, content2, font)
+ setattr(self, name, value)
+
+ def __ne__(self, other):
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+ return self.__dict__ == other.__dict__
diff --git a/Lib/fontTools/ttLib/tables/otConverters.py b/Lib/fontTools/ttLib/tables/otConverters.py
index b08f1f19..390f1660 100644
--- a/Lib/fontTools/ttLib/tables/otConverters.py
+++ b/Lib/fontTools/ttLib/tables/otConverters.py
@@ -1,22 +1,34 @@
from fontTools.misc.fixedTools import (
- fixedToFloat as fi2fl,
- floatToFixed as fl2fi,
- floatToFixedToStr as fl2str,
- strToFixedToFloat as str2fl,
- ensureVersionIsLong as fi2ve,
- versionToFixed as ve2fi,
+ fixedToFloat as fi2fl,
+ floatToFixed as fl2fi,
+ floatToFixedToStr as fl2str,
+ strToFixedToFloat as str2fl,
+ ensureVersionIsLong as fi2ve,
+ versionToFixed as ve2fi,
)
from fontTools.misc.roundTools import nearestMultipleShortestRepr, otRound
from fontTools.misc.textTools import bytesjoin, tobytes, tostr, pad, safeEval
from fontTools.ttLib import getSearchRange
-from .otBase import (CountReference, FormatSwitchingBaseTable,
- OTTableReader, OTTableWriter, ValueRecordFactory)
-from .otTables import (lookupTypes, AATStateTable, AATState, AATAction,
- ContextualMorphAction, LigatureMorphAction,
- InsertionMorphAction, MorxSubtable,
- ExtendMode as _ExtendMode,
- CompositeMode as _CompositeMode,
- NO_VARIATION_INDEX)
+from .otBase import (
+ CountReference,
+ FormatSwitchingBaseTable,
+ OTTableReader,
+ OTTableWriter,
+ ValueRecordFactory,
+)
+from .otTables import (
+ lookupTypes,
+ AATStateTable,
+ AATState,
+ AATAction,
+ ContextualMorphAction,
+ LigatureMorphAction,
+ InsertionMorphAction,
+ MorxSubtable,
+ ExtendMode as _ExtendMode,
+ CompositeMode as _CompositeMode,
+ NO_VARIATION_INDEX,
+)
from itertools import zip_longest
from functools import partial
import re
@@ -30,947 +42,1059 @@ istuple = lambda t: isinstance(t, tuple)
def buildConverters(tableSpec, tableNamespace):
- """Given a table spec from otData.py, build a converter object for each
- field of the table. This is called for each table in otData.py, and
- the results are assigned to the corresponding class in otTables.py."""
- converters = []
- convertersByName = {}
- for tp, name, repeat, aux, descr in tableSpec:
- tableName = name
- if name.startswith("ValueFormat"):
- assert tp == "uint16"
- converterClass = ValueFormat
- elif name.endswith("Count") or name in ("StructLength", "MorphType"):
- converterClass = {
- "uint8": ComputedUInt8,
- "uint16": ComputedUShort,
- "uint32": ComputedULong,
- }[tp]
- elif name == "SubTable":
- converterClass = SubTable
- elif name == "ExtSubTable":
- converterClass = ExtSubTable
- elif name == "SubStruct":
- converterClass = SubStruct
- elif name == "FeatureParams":
- converterClass = FeatureParams
- elif name in ("CIDGlyphMapping", "GlyphCIDMapping"):
- converterClass = StructWithLength
- else:
- if not tp in converterMapping and '(' not in tp:
- tableName = tp
- converterClass = Struct
- else:
- converterClass = eval(tp, tableNamespace, converterMapping)
-
- conv = converterClass(name, repeat, aux, description=descr)
-
- if conv.tableClass:
- # A "template" such as OffsetTo(AType) knowss the table class already
- tableClass = conv.tableClass
- elif tp in ('MortChain', 'MortSubtable', 'MorxChain'):
- tableClass = tableNamespace.get(tp)
- else:
- tableClass = tableNamespace.get(tableName)
-
- if not conv.tableClass:
- conv.tableClass = tableClass
-
- if name in ["SubTable", "ExtSubTable", "SubStruct"]:
- conv.lookupTypes = tableNamespace['lookupTypes']
- # also create reverse mapping
- for t in conv.lookupTypes.values():
- for cls in t.values():
- convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
- if name == "FeatureParams":
- conv.featureParamTypes = tableNamespace['featureParamTypes']
- conv.defaultFeatureParams = tableNamespace['FeatureParams']
- for cls in conv.featureParamTypes.values():
- convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
- converters.append(conv)
- assert name not in convertersByName, name
- convertersByName[name] = conv
- return converters, convertersByName
+ """Given a table spec from otData.py, build a converter object for each
+ field of the table. This is called for each table in otData.py, and
+ the results are assigned to the corresponding class in otTables.py."""
+ converters = []
+ convertersByName = {}
+ for tp, name, repeat, aux, descr in tableSpec:
+ tableName = name
+ if name.startswith("ValueFormat"):
+ assert tp == "uint16"
+ converterClass = ValueFormat
+ elif name.endswith("Count") or name in ("StructLength", "MorphType"):
+ converterClass = {
+ "uint8": ComputedUInt8,
+ "uint16": ComputedUShort,
+ "uint32": ComputedULong,
+ }[tp]
+ elif name == "SubTable":
+ converterClass = SubTable
+ elif name == "ExtSubTable":
+ converterClass = ExtSubTable
+ elif name == "SubStruct":
+ converterClass = SubStruct
+ elif name == "FeatureParams":
+ converterClass = FeatureParams
+ elif name in ("CIDGlyphMapping", "GlyphCIDMapping"):
+ converterClass = StructWithLength
+ else:
+ if not tp in converterMapping and "(" not in tp:
+ tableName = tp
+ converterClass = Struct
+ else:
+ converterClass = eval(tp, tableNamespace, converterMapping)
+
+ conv = converterClass(name, repeat, aux, description=descr)
+
+ if conv.tableClass:
+ # A "template" such as OffsetTo(AType) knowss the table class already
+ tableClass = conv.tableClass
+ elif tp in ("MortChain", "MortSubtable", "MorxChain"):
+ tableClass = tableNamespace.get(tp)
+ else:
+ tableClass = tableNamespace.get(tableName)
+
+ if not conv.tableClass:
+ conv.tableClass = tableClass
+
+ if name in ["SubTable", "ExtSubTable", "SubStruct"]:
+ conv.lookupTypes = tableNamespace["lookupTypes"]
+ # also create reverse mapping
+ for t in conv.lookupTypes.values():
+ for cls in t.values():
+ convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
+ if name == "FeatureParams":
+ conv.featureParamTypes = tableNamespace["featureParamTypes"]
+ conv.defaultFeatureParams = tableNamespace["FeatureParams"]
+ for cls in conv.featureParamTypes.values():
+ convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
+ converters.append(conv)
+ assert name not in convertersByName, name
+ convertersByName[name] = conv
+ return converters, convertersByName
class _MissingItem(tuple):
- __slots__ = ()
+ __slots__ = ()
try:
- from collections import UserList
+ from collections import UserList
except ImportError:
- from UserList import UserList
+ from UserList import UserList
class _LazyList(UserList):
-
- def __getslice__(self, i, j):
- return self.__getitem__(slice(i, j))
-
- def __getitem__(self, k):
- if isinstance(k, slice):
- indices = range(*k.indices(len(self)))
- return [self[i] for i in indices]
- item = self.data[k]
- if isinstance(item, _MissingItem):
- self.reader.seek(self.pos + item[0] * self.recordSize)
- item = self.conv.read(self.reader, self.font, {})
- self.data[k] = item
- return item
-
- def __add__(self, other):
- if isinstance(other, _LazyList):
- other = list(other)
- elif isinstance(other, list):
- pass
- else:
- return NotImplemented
- return list(self) + other
-
- def __radd__(self, other):
- if not isinstance(other, list):
- return NotImplemented
- return other + list(self)
+ def __getslice__(self, i, j):
+ return self.__getitem__(slice(i, j))
+
+ def __getitem__(self, k):
+ if isinstance(k, slice):
+ indices = range(*k.indices(len(self)))
+ return [self[i] for i in indices]
+ item = self.data[k]
+ if isinstance(item, _MissingItem):
+ self.reader.seek(self.pos + item[0] * self.recordSize)
+ item = self.conv.read(self.reader, self.font, {})
+ self.data[k] = item
+ return item
+
+ def __add__(self, other):
+ if isinstance(other, _LazyList):
+ other = list(other)
+ elif isinstance(other, list):
+ pass
+ else:
+ return NotImplemented
+ return list(self) + other
+
+ def __radd__(self, other):
+ if not isinstance(other, list):
+ return NotImplemented
+ return other + list(self)
class BaseConverter(object):
- """Base class for converter objects. Apart from the constructor, this
- is an abstract class."""
-
- def __init__(self, name, repeat, aux, tableClass=None, *, description=""):
- self.name = name
- self.repeat = repeat
- self.aux = aux
- self.tableClass = tableClass
- self.isCount = name.endswith("Count") or name in ['DesignAxisRecordSize', 'ValueRecordSize']
- self.isLookupType = name.endswith("LookupType") or name == "MorphType"
- self.isPropagated = name in [
- "ClassCount",
- "Class2Count",
- "FeatureTag",
- "SettingsCount",
- "VarRegionCount",
- "MappingCount",
- "RegionAxisCount",
- "DesignAxisCount",
- "DesignAxisRecordSize",
- "AxisValueCount",
- "ValueRecordSize",
- "AxisCount",
- "BaseGlyphRecordCount",
- "LayerRecordCount",
- ]
- self.description = description
-
- def readArray(self, reader, font, tableDict, count):
- """Read an array of values from the reader."""
- lazy = font.lazy and count > 8
- if lazy:
- recordSize = self.getRecordSize(reader)
- if recordSize is NotImplemented:
- lazy = False
- if not lazy:
- l = []
- for i in range(count):
- l.append(self.read(reader, font, tableDict))
- return l
- else:
- l = _LazyList()
- l.reader = reader.copy()
- l.pos = l.reader.pos
- l.font = font
- l.conv = self
- l.recordSize = recordSize
- l.extend(_MissingItem([i]) for i in range(count))
- reader.advance(count * recordSize)
- return l
-
- def getRecordSize(self, reader):
- if hasattr(self, 'staticSize'): return self.staticSize
- return NotImplemented
-
- def read(self, reader, font, tableDict):
- """Read a value from the reader."""
- raise NotImplementedError(self)
-
- def writeArray(self, writer, font, tableDict, values):
- try:
- for i, value in enumerate(values):
- self.write(writer, font, tableDict, value, i)
- except Exception as e:
- e.args = e.args + (i,)
- raise
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- """Write a value to the writer."""
- raise NotImplementedError(self)
-
- def xmlRead(self, attrs, content, font):
- """Read a value from XML."""
- raise NotImplementedError(self)
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- """Write a value to XML."""
- raise NotImplementedError(self)
-
- varIndexBasePlusOffsetRE = re.compile(r"VarIndexBase\s*\+\s*(\d+)")
-
- def getVarIndexOffset(self) -> Optional[int]:
- """If description has `VarIndexBase + {offset}`, return the offset else None."""
- m = self.varIndexBasePlusOffsetRE.search(self.description)
- if not m:
- return None
- return int(m.group(1))
+ """Base class for converter objects. Apart from the constructor, this
+ is an abstract class."""
+
+ def __init__(self, name, repeat, aux, tableClass=None, *, description=""):
+ self.name = name
+ self.repeat = repeat
+ self.aux = aux
+ self.tableClass = tableClass
+ self.isCount = name.endswith("Count") or name in [
+ "DesignAxisRecordSize",
+ "ValueRecordSize",
+ ]
+ self.isLookupType = name.endswith("LookupType") or name == "MorphType"
+ self.isPropagated = name in [
+ "ClassCount",
+ "Class2Count",
+ "FeatureTag",
+ "SettingsCount",
+ "VarRegionCount",
+ "MappingCount",
+ "RegionAxisCount",
+ "DesignAxisCount",
+ "DesignAxisRecordSize",
+ "AxisValueCount",
+ "ValueRecordSize",
+ "AxisCount",
+ "BaseGlyphRecordCount",
+ "LayerRecordCount",
+ ]
+ self.description = description
+
+ def readArray(self, reader, font, tableDict, count):
+ """Read an array of values from the reader."""
+ lazy = font.lazy and count > 8
+ if lazy:
+ recordSize = self.getRecordSize(reader)
+ if recordSize is NotImplemented:
+ lazy = False
+ if not lazy:
+ l = []
+ for i in range(count):
+ l.append(self.read(reader, font, tableDict))
+ return l
+ else:
+ l = _LazyList()
+ l.reader = reader.copy()
+ l.pos = l.reader.pos
+ l.font = font
+ l.conv = self
+ l.recordSize = recordSize
+ l.extend(_MissingItem([i]) for i in range(count))
+ reader.advance(count * recordSize)
+ return l
+
+ def getRecordSize(self, reader):
+ if hasattr(self, "staticSize"):
+ return self.staticSize
+ return NotImplemented
+
+ def read(self, reader, font, tableDict):
+ """Read a value from the reader."""
+ raise NotImplementedError(self)
+
+ def writeArray(self, writer, font, tableDict, values):
+ try:
+ for i, value in enumerate(values):
+ self.write(writer, font, tableDict, value, i)
+ except Exception as e:
+ e.args = e.args + (i,)
+ raise
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ """Write a value to the writer."""
+ raise NotImplementedError(self)
+
+ def xmlRead(self, attrs, content, font):
+ """Read a value from XML."""
+ raise NotImplementedError(self)
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ """Write a value to XML."""
+ raise NotImplementedError(self)
+
+ varIndexBasePlusOffsetRE = re.compile(r"VarIndexBase\s*\+\s*(\d+)")
+
+ def getVarIndexOffset(self) -> Optional[int]:
+ """If description has `VarIndexBase + {offset}`, return the offset else None."""
+ m = self.varIndexBasePlusOffsetRE.search(self.description)
+ if not m:
+ return None
+ return int(m.group(1))
class SimpleValue(BaseConverter):
- @staticmethod
- def toString(value):
- return value
- @staticmethod
- def fromString(value):
- return value
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.simpletag(name, attrs + [("value", self.toString(value))])
- xmlWriter.newline()
- def xmlRead(self, attrs, content, font):
- return self.fromString(attrs["value"])
+ @staticmethod
+ def toString(value):
+ return value
+
+ @staticmethod
+ def fromString(value):
+ return value
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.simpletag(name, attrs + [("value", self.toString(value))])
+ xmlWriter.newline()
+
+ def xmlRead(self, attrs, content, font):
+ return self.fromString(attrs["value"])
+
class OptionalValue(SimpleValue):
- DEFAULT = None
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- if value != self.DEFAULT:
- attrs.append(("value", self.toString(value)))
- xmlWriter.simpletag(name, attrs)
- xmlWriter.newline()
- def xmlRead(self, attrs, content, font):
- if "value" in attrs:
- return self.fromString(attrs["value"])
- return self.DEFAULT
+ DEFAULT = None
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ if value != self.DEFAULT:
+ attrs.append(("value", self.toString(value)))
+ xmlWriter.simpletag(name, attrs)
+ xmlWriter.newline()
+
+ def xmlRead(self, attrs, content, font):
+ if "value" in attrs:
+ return self.fromString(attrs["value"])
+ return self.DEFAULT
+
class IntValue(SimpleValue):
- @staticmethod
- def fromString(value):
- return int(value, 0)
+ @staticmethod
+ def fromString(value):
+ return int(value, 0)
+
class Long(IntValue):
- staticSize = 4
- def read(self, reader, font, tableDict):
- return reader.readLong()
- def readArray(self, reader, font, tableDict, count):
- return reader.readLongArray(count)
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeLong(value)
- def writeArray(self, writer, font, tableDict, values):
- writer.writeLongArray(values)
+ staticSize = 4
+
+ def read(self, reader, font, tableDict):
+ return reader.readLong()
+
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readLongArray(count)
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeLong(value)
+
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeLongArray(values)
+
class ULong(IntValue):
- staticSize = 4
- def read(self, reader, font, tableDict):
- return reader.readULong()
- def readArray(self, reader, font, tableDict, count):
- return reader.readULongArray(count)
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeULong(value)
- def writeArray(self, writer, font, tableDict, values):
- writer.writeULongArray(values)
+ staticSize = 4
+
+ def read(self, reader, font, tableDict):
+ return reader.readULong()
+
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readULongArray(count)
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeULong(value)
+
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeULongArray(values)
+
class Flags32(ULong):
- @staticmethod
- def toString(value):
- return "0x%08X" % value
+ @staticmethod
+ def toString(value):
+ return "0x%08X" % value
+
class VarIndex(OptionalValue, ULong):
- DEFAULT = NO_VARIATION_INDEX
+ DEFAULT = NO_VARIATION_INDEX
+
class Short(IntValue):
- staticSize = 2
- def read(self, reader, font, tableDict):
- return reader.readShort()
- def readArray(self, reader, font, tableDict, count):
- return reader.readShortArray(count)
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeShort(value)
- def writeArray(self, writer, font, tableDict, values):
- writer.writeShortArray(values)
+ staticSize = 2
+
+ def read(self, reader, font, tableDict):
+ return reader.readShort()
+
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readShortArray(count)
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeShort(value)
+
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeShortArray(values)
+
class UShort(IntValue):
- staticSize = 2
- def read(self, reader, font, tableDict):
- return reader.readUShort()
- def readArray(self, reader, font, tableDict, count):
- return reader.readUShortArray(count)
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeUShort(value)
- def writeArray(self, writer, font, tableDict, values):
- writer.writeUShortArray(values)
+ staticSize = 2
+
+ def read(self, reader, font, tableDict):
+ return reader.readUShort()
+
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readUShortArray(count)
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeUShort(value)
+
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeUShortArray(values)
+
class Int8(IntValue):
- staticSize = 1
- def read(self, reader, font, tableDict):
- return reader.readInt8()
- def readArray(self, reader, font, tableDict, count):
- return reader.readInt8Array(count)
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeInt8(value)
- def writeArray(self, writer, font, tableDict, values):
- writer.writeInt8Array(values)
+ staticSize = 1
+
+ def read(self, reader, font, tableDict):
+ return reader.readInt8()
+
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readInt8Array(count)
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeInt8(value)
+
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeInt8Array(values)
+
class UInt8(IntValue):
- staticSize = 1
- def read(self, reader, font, tableDict):
- return reader.readUInt8()
- def readArray(self, reader, font, tableDict, count):
- return reader.readUInt8Array(count)
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeUInt8(value)
- def writeArray(self, writer, font, tableDict, values):
- writer.writeUInt8Array(values)
+ staticSize = 1
+
+ def read(self, reader, font, tableDict):
+ return reader.readUInt8()
+
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readUInt8Array(count)
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeUInt8(value)
+
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeUInt8Array(values)
+
class UInt24(IntValue):
- staticSize = 3
- def read(self, reader, font, tableDict):
- return reader.readUInt24()
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeUInt24(value)
+ staticSize = 3
+
+ def read(self, reader, font, tableDict):
+ return reader.readUInt24()
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeUInt24(value)
+
class ComputedInt(IntValue):
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- if value is not None:
- xmlWriter.comment("%s=%s" % (name, value))
- xmlWriter.newline()
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ if value is not None:
+ xmlWriter.comment("%s=%s" % (name, value))
+ xmlWriter.newline()
+
class ComputedUInt8(ComputedInt, UInt8):
- pass
+ pass
+
+
class ComputedUShort(ComputedInt, UShort):
- pass
+ pass
+
+
class ComputedULong(ComputedInt, ULong):
- pass
+ pass
+
class Tag(SimpleValue):
- staticSize = 4
- def read(self, reader, font, tableDict):
- return reader.readTag()
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeTag(value)
+ staticSize = 4
+
+ def read(self, reader, font, tableDict):
+ return reader.readTag()
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeTag(value)
+
class GlyphID(SimpleValue):
- staticSize = 2
- typecode = "H"
- def readArray(self, reader, font, tableDict, count):
- return font.getGlyphNameMany(reader.readArray(self.typecode, self.staticSize, count))
- def read(self, reader, font, tableDict):
- return font.getGlyphName(reader.readValue(self.typecode, self.staticSize))
- def writeArray(self, writer, font, tableDict, values):
- writer.writeArray(self.typecode, font.getGlyphIDMany(values))
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeValue(self.typecode, font.getGlyphID(value))
+ staticSize = 2
+ typecode = "H"
+
+ def readArray(self, reader, font, tableDict, count):
+ return font.getGlyphNameMany(
+ reader.readArray(self.typecode, self.staticSize, count)
+ )
+
+ def read(self, reader, font, tableDict):
+ return font.getGlyphName(reader.readValue(self.typecode, self.staticSize))
+
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeArray(self.typecode, font.getGlyphIDMany(values))
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeValue(self.typecode, font.getGlyphID(value))
class GlyphID32(GlyphID):
- staticSize = 4
- typecode = "L"
+ staticSize = 4
+ typecode = "L"
class NameID(UShort):
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.simpletag(name, attrs + [("value", value)])
- if font and value:
- nameTable = font.get("name")
- if nameTable:
- name = nameTable.getDebugName(value)
- xmlWriter.write(" ")
- if name:
- xmlWriter.comment(name)
- else:
- xmlWriter.comment("missing from name table")
- log.warning("name id %d missing from name table" % value)
- xmlWriter.newline()
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.simpletag(name, attrs + [("value", value)])
+ if font and value:
+ nameTable = font.get("name")
+ if nameTable:
+ name = nameTable.getDebugName(value)
+ xmlWriter.write(" ")
+ if name:
+ xmlWriter.comment(name)
+ else:
+ xmlWriter.comment("missing from name table")
+ log.warning("name id %d missing from name table" % value)
+ xmlWriter.newline()
+
class STATFlags(UShort):
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.simpletag(name, attrs + [("value", value)])
- flags = []
- if value & 0x01:
- flags.append("OlderSiblingFontAttribute")
- if value & 0x02:
- flags.append("ElidableAxisValueName")
- if flags:
- xmlWriter.write(" ")
- xmlWriter.comment(" ".join(flags))
- xmlWriter.newline()
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.simpletag(name, attrs + [("value", value)])
+ flags = []
+ if value & 0x01:
+ flags.append("OlderSiblingFontAttribute")
+ if value & 0x02:
+ flags.append("ElidableAxisValueName")
+ if flags:
+ xmlWriter.write(" ")
+ xmlWriter.comment(" ".join(flags))
+ xmlWriter.newline()
+
class FloatValue(SimpleValue):
- @staticmethod
- def fromString(value):
- return float(value)
+ @staticmethod
+ def fromString(value):
+ return float(value)
+
class DeciPoints(FloatValue):
- staticSize = 2
- def read(self, reader, font, tableDict):
- return reader.readUShort() / 10
+ staticSize = 2
+
+ def read(self, reader, font, tableDict):
+ return reader.readUShort() / 10
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeUShort(round(value * 10))
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeUShort(round(value * 10))
class BaseFixedValue(FloatValue):
- staticSize = NotImplemented
- precisionBits = NotImplemented
- readerMethod = NotImplemented
- writerMethod = NotImplemented
- def read(self, reader, font, tableDict):
- return self.fromInt(getattr(reader, self.readerMethod)())
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- getattr(writer, self.writerMethod)(self.toInt(value))
- @classmethod
- def fromInt(cls, value):
- return fi2fl(value, cls.precisionBits)
- @classmethod
- def toInt(cls, value):
- return fl2fi(value, cls.precisionBits)
- @classmethod
- def fromString(cls, value):
- return str2fl(value, cls.precisionBits)
- @classmethod
- def toString(cls, value):
- return fl2str(value, cls.precisionBits)
+ staticSize = NotImplemented
+ precisionBits = NotImplemented
+ readerMethod = NotImplemented
+ writerMethod = NotImplemented
+
+ def read(self, reader, font, tableDict):
+ return self.fromInt(getattr(reader, self.readerMethod)())
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ getattr(writer, self.writerMethod)(self.toInt(value))
+
+ @classmethod
+ def fromInt(cls, value):
+ return fi2fl(value, cls.precisionBits)
+
+ @classmethod
+ def toInt(cls, value):
+ return fl2fi(value, cls.precisionBits)
+
+ @classmethod
+ def fromString(cls, value):
+ return str2fl(value, cls.precisionBits)
+
+ @classmethod
+ def toString(cls, value):
+ return fl2str(value, cls.precisionBits)
+
class Fixed(BaseFixedValue):
- staticSize = 4
- precisionBits = 16
- readerMethod = "readLong"
- writerMethod = "writeLong"
+ staticSize = 4
+ precisionBits = 16
+ readerMethod = "readLong"
+ writerMethod = "writeLong"
+
class F2Dot14(BaseFixedValue):
- staticSize = 2
- precisionBits = 14
- readerMethod = "readShort"
- writerMethod = "writeShort"
+ staticSize = 2
+ precisionBits = 14
+ readerMethod = "readShort"
+ writerMethod = "writeShort"
+
class Angle(F2Dot14):
- # angles are specified in degrees, and encoded as F2Dot14 fractions of half
- # circle: e.g. 1.0 => 180, -0.5 => -90, -2.0 => -360, etc.
- bias = 0.0
- factor = 1.0/(1<<14) * 180 # 0.010986328125
- @classmethod
- def fromInt(cls, value):
- return (super().fromInt(value) + cls.bias) * 180
- @classmethod
- def toInt(cls, value):
- return super().toInt((value / 180) - cls.bias)
- @classmethod
- def fromString(cls, value):
- # quantize to nearest multiples of minimum fixed-precision angle
- return otRound(float(value) / cls.factor) * cls.factor
- @classmethod
- def toString(cls, value):
- return nearestMultipleShortestRepr(value, cls.factor)
+ # angles are specified in degrees, and encoded as F2Dot14 fractions of half
+ # circle: e.g. 1.0 => 180, -0.5 => -90, -2.0 => -360, etc.
+ bias = 0.0
+ factor = 1.0 / (1 << 14) * 180 # 0.010986328125
+
+ @classmethod
+ def fromInt(cls, value):
+ return (super().fromInt(value) + cls.bias) * 180
+
+ @classmethod
+ def toInt(cls, value):
+ return super().toInt((value / 180) - cls.bias)
+
+ @classmethod
+ def fromString(cls, value):
+ # quantize to nearest multiples of minimum fixed-precision angle
+ return otRound(float(value) / cls.factor) * cls.factor
+
+ @classmethod
+ def toString(cls, value):
+ return nearestMultipleShortestRepr(value, cls.factor)
+
class BiasedAngle(Angle):
- # A bias of 1.0 is used in the representation of start and end angles
- # of COLRv1 PaintSweepGradients to allow for encoding +360deg
- bias = 1.0
+ # A bias of 1.0 is used in the representation of start and end angles
+ # of COLRv1 PaintSweepGradients to allow for encoding +360deg
+ bias = 1.0
+
class Version(SimpleValue):
- staticSize = 4
- def read(self, reader, font, tableDict):
- value = reader.readLong()
- assert (value >> 16) == 1, "Unsupported version 0x%08x" % value
- return value
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- value = fi2ve(value)
- assert (value >> 16) == 1, "Unsupported version 0x%08x" % value
- writer.writeLong(value)
- @staticmethod
- def fromString(value):
- return ve2fi(value)
- @staticmethod
- def toString(value):
- return "0x%08x" % value
- @staticmethod
- def fromFloat(v):
- return fl2fi(v, 16)
+ staticSize = 4
+
+ def read(self, reader, font, tableDict):
+ value = reader.readLong()
+ return value
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ value = fi2ve(value)
+ writer.writeLong(value)
+
+ @staticmethod
+ def fromString(value):
+ return ve2fi(value)
+
+ @staticmethod
+ def toString(value):
+ return "0x%08x" % value
+
+ @staticmethod
+ def fromFloat(v):
+ return fl2fi(v, 16)
class Char64(SimpleValue):
- """An ASCII string with up to 64 characters.
-
- Unused character positions are filled with 0x00 bytes.
- Used in Apple AAT fonts in the `gcid` table.
- """
- staticSize = 64
-
- def read(self, reader, font, tableDict):
- data = reader.readData(self.staticSize)
- zeroPos = data.find(b"\0")
- if zeroPos >= 0:
- data = data[:zeroPos]
- s = tostr(data, encoding="ascii", errors="replace")
- if s != tostr(data, encoding="ascii", errors="ignore"):
- log.warning('replaced non-ASCII characters in "%s"' %
- s)
- return s
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- data = tobytes(value, encoding="ascii", errors="replace")
- if data != tobytes(value, encoding="ascii", errors="ignore"):
- log.warning('replacing non-ASCII characters in "%s"' %
- value)
- if len(data) > self.staticSize:
- log.warning('truncating overlong "%s" to %d bytes' %
- (value, self.staticSize))
- data = (data + b"\0" * self.staticSize)[:self.staticSize]
- writer.writeData(data)
+ """An ASCII string with up to 64 characters.
+
+ Unused character positions are filled with 0x00 bytes.
+ Used in Apple AAT fonts in the `gcid` table.
+ """
+
+ staticSize = 64
+
+ def read(self, reader, font, tableDict):
+ data = reader.readData(self.staticSize)
+ zeroPos = data.find(b"\0")
+ if zeroPos >= 0:
+ data = data[:zeroPos]
+ s = tostr(data, encoding="ascii", errors="replace")
+ if s != tostr(data, encoding="ascii", errors="ignore"):
+ log.warning('replaced non-ASCII characters in "%s"' % s)
+ return s
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ data = tobytes(value, encoding="ascii", errors="replace")
+ if data != tobytes(value, encoding="ascii", errors="ignore"):
+ log.warning('replacing non-ASCII characters in "%s"' % value)
+ if len(data) > self.staticSize:
+ log.warning(
+ 'truncating overlong "%s" to %d bytes' % (value, self.staticSize)
+ )
+ data = (data + b"\0" * self.staticSize)[: self.staticSize]
+ writer.writeData(data)
class Struct(BaseConverter):
-
- def getRecordSize(self, reader):
- return self.tableClass and self.tableClass.getRecordSize(reader)
-
- def read(self, reader, font, tableDict):
- table = self.tableClass()
- table.decompile(reader, font)
- return table
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- value.compile(writer, font)
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- if value is None:
- if attrs:
- # If there are attributes (probably index), then
- # don't drop this even if it's NULL. It will mess
- # up the array indices of the containing element.
- xmlWriter.simpletag(name, attrs + [("empty", 1)])
- xmlWriter.newline()
- else:
- pass # NULL table, ignore
- else:
- value.toXML(xmlWriter, font, attrs, name=name)
-
- def xmlRead(self, attrs, content, font):
- if "empty" in attrs and safeEval(attrs["empty"]):
- return None
- table = self.tableClass()
- Format = attrs.get("Format")
- if Format is not None:
- table.Format = int(Format)
-
- noPostRead = not hasattr(table, 'postRead')
- if noPostRead:
- # TODO Cache table.hasPropagated.
- cleanPropagation = False
- for conv in table.getConverters():
- if conv.isPropagated:
- cleanPropagation = True
- if not hasattr(font, '_propagator'):
- font._propagator = {}
- propagator = font._propagator
- assert conv.name not in propagator, (conv.name, propagator)
- setattr(table, conv.name, None)
- propagator[conv.name] = CountReference(table.__dict__, conv.name)
-
- for element in content:
- if isinstance(element, tuple):
- name, attrs, content = element
- table.fromXML(name, attrs, content, font)
- else:
- pass
-
- table.populateDefaults(propagator=getattr(font, '_propagator', None))
-
- if noPostRead:
- if cleanPropagation:
- for conv in table.getConverters():
- if conv.isPropagated:
- propagator = font._propagator
- del propagator[conv.name]
- if not propagator:
- del font._propagator
-
- return table
-
- def __repr__(self):
- return "Struct of " + repr(self.tableClass)
+ def getRecordSize(self, reader):
+ return self.tableClass and self.tableClass.getRecordSize(reader)
+
+ def read(self, reader, font, tableDict):
+ table = self.tableClass()
+ table.decompile(reader, font)
+ return table
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ value.compile(writer, font)
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ if value is None:
+ if attrs:
+ # If there are attributes (probably index), then
+ # don't drop this even if it's NULL. It will mess
+ # up the array indices of the containing element.
+ xmlWriter.simpletag(name, attrs + [("empty", 1)])
+ xmlWriter.newline()
+ else:
+ pass # NULL table, ignore
+ else:
+ value.toXML(xmlWriter, font, attrs, name=name)
+
+ def xmlRead(self, attrs, content, font):
+ if "empty" in attrs and safeEval(attrs["empty"]):
+ return None
+ table = self.tableClass()
+ Format = attrs.get("Format")
+ if Format is not None:
+ table.Format = int(Format)
+
+ noPostRead = not hasattr(table, "postRead")
+ if noPostRead:
+ # TODO Cache table.hasPropagated.
+ cleanPropagation = False
+ for conv in table.getConverters():
+ if conv.isPropagated:
+ cleanPropagation = True
+ if not hasattr(font, "_propagator"):
+ font._propagator = {}
+ propagator = font._propagator
+ assert conv.name not in propagator, (conv.name, propagator)
+ setattr(table, conv.name, None)
+ propagator[conv.name] = CountReference(table.__dict__, conv.name)
+
+ for element in content:
+ if isinstance(element, tuple):
+ name, attrs, content = element
+ table.fromXML(name, attrs, content, font)
+ else:
+ pass
+
+ table.populateDefaults(propagator=getattr(font, "_propagator", None))
+
+ if noPostRead:
+ if cleanPropagation:
+ for conv in table.getConverters():
+ if conv.isPropagated:
+ propagator = font._propagator
+ del propagator[conv.name]
+ if not propagator:
+ del font._propagator
+
+ return table
+
+ def __repr__(self):
+ return "Struct of " + repr(self.tableClass)
class StructWithLength(Struct):
- def read(self, reader, font, tableDict):
- pos = reader.pos
- table = self.tableClass()
- table.decompile(reader, font)
- reader.seek(pos + table.StructLength)
- return table
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- for convIndex, conv in enumerate(value.getConverters()):
- if conv.name == "StructLength":
- break
- lengthIndex = len(writer.items) + convIndex
- if isinstance(value, FormatSwitchingBaseTable):
- lengthIndex += 1 # implicit Format field
- deadbeef = {1:0xDE, 2:0xDEAD, 4:0xDEADBEEF}[conv.staticSize]
-
- before = writer.getDataLength()
- value.StructLength = deadbeef
- value.compile(writer, font)
- length = writer.getDataLength() - before
- lengthWriter = writer.getSubWriter()
- conv.write(lengthWriter, font, tableDict, length)
- assert(writer.items[lengthIndex] ==
- b"\xde\xad\xbe\xef"[:conv.staticSize])
- writer.items[lengthIndex] = lengthWriter.getAllData()
+ def read(self, reader, font, tableDict):
+ pos = reader.pos
+ table = self.tableClass()
+ table.decompile(reader, font)
+ reader.seek(pos + table.StructLength)
+ return table
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ for convIndex, conv in enumerate(value.getConverters()):
+ if conv.name == "StructLength":
+ break
+ lengthIndex = len(writer.items) + convIndex
+ if isinstance(value, FormatSwitchingBaseTable):
+ lengthIndex += 1 # implicit Format field
+ deadbeef = {1: 0xDE, 2: 0xDEAD, 4: 0xDEADBEEF}[conv.staticSize]
+
+ before = writer.getDataLength()
+ value.StructLength = deadbeef
+ value.compile(writer, font)
+ length = writer.getDataLength() - before
+ lengthWriter = writer.getSubWriter()
+ conv.write(lengthWriter, font, tableDict, length)
+ assert writer.items[lengthIndex] == b"\xde\xad\xbe\xef"[: conv.staticSize]
+ writer.items[lengthIndex] = lengthWriter.getAllData()
class Table(Struct):
+ staticSize = 2
+
+ def readOffset(self, reader):
+ return reader.readUShort()
+
+ def writeNullOffset(self, writer):
+ writer.writeUShort(0)
+
+ def read(self, reader, font, tableDict):
+ offset = self.readOffset(reader)
+ if offset == 0:
+ return None
+ table = self.tableClass()
+ reader = reader.getSubReader(offset)
+ if font.lazy:
+ table.reader = reader
+ table.font = font
+ else:
+ table.decompile(reader, font)
+ return table
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ if value is None:
+ self.writeNullOffset(writer)
+ else:
+ subWriter = writer.getSubWriter()
+ subWriter.name = self.name
+ if repeatIndex is not None:
+ subWriter.repeatIndex = repeatIndex
+ writer.writeSubTable(subWriter, offsetSize=self.staticSize)
+ value.compile(subWriter, font)
- staticSize = 2
-
- def readOffset(self, reader):
- return reader.readUShort()
-
- def writeNullOffset(self, writer):
- writer.writeUShort(0)
-
- def read(self, reader, font, tableDict):
- offset = self.readOffset(reader)
- if offset == 0:
- return None
- table = self.tableClass()
- reader = reader.getSubReader(offset)
- if font.lazy:
- table.reader = reader
- table.font = font
- else:
- table.decompile(reader, font)
- return table
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- if value is None:
- self.writeNullOffset(writer)
- else:
- subWriter = writer.getSubWriter(offsetSize=self.staticSize)
- subWriter.name = self.name
- if repeatIndex is not None:
- subWriter.repeatIndex = repeatIndex
- writer.writeSubTable(subWriter)
- value.compile(subWriter, font)
class LTable(Table):
+ staticSize = 4
- staticSize = 4
+ def readOffset(self, reader):
+ return reader.readULong()
- def readOffset(self, reader):
- return reader.readULong()
-
- def writeNullOffset(self, writer):
- writer.writeULong(0)
+ def writeNullOffset(self, writer):
+ writer.writeULong(0)
# Table pointed to by a 24-bit, 3-byte long offset
class Table24(Table):
+ staticSize = 3
- staticSize = 3
-
- def readOffset(self, reader):
- return reader.readUInt24()
+ def readOffset(self, reader):
+ return reader.readUInt24()
- def writeNullOffset(self, writer):
- writer.writeUInt24(0)
+ def writeNullOffset(self, writer):
+ writer.writeUInt24(0)
# TODO Clean / merge the SubTable and SubStruct
+
class SubStruct(Struct):
- def getConverter(self, tableType, lookupType):
- tableClass = self.lookupTypes[tableType][lookupType]
- return self.__class__(self.name, self.repeat, self.aux, tableClass)
+ def getConverter(self, tableType, lookupType):
+ tableClass = self.lookupTypes[tableType][lookupType]
+ return self.__class__(self.name, self.repeat, self.aux, tableClass)
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ super(SubStruct, self).xmlWrite(xmlWriter, font, value, None, attrs)
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- super(SubStruct, self).xmlWrite(xmlWriter, font, value, None, attrs)
class SubTable(Table):
- def getConverter(self, tableType, lookupType):
- tableClass = self.lookupTypes[tableType][lookupType]
- return self.__class__(self.name, self.repeat, self.aux, tableClass)
+ def getConverter(self, tableType, lookupType):
+ tableClass = self.lookupTypes[tableType][lookupType]
+ return self.__class__(self.name, self.repeat, self.aux, tableClass)
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- super(SubTable, self).xmlWrite(xmlWriter, font, value, None, attrs)
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ super(SubTable, self).xmlWrite(xmlWriter, font, value, None, attrs)
-class ExtSubTable(LTable, SubTable):
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.Extension = True # actually, mere presence of the field flags it as an Ext Subtable writer.
- Table.write(self, writer, font, tableDict, value, repeatIndex)
+class ExtSubTable(LTable, SubTable):
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.Extension = True # actually, mere presence of the field flags it as an Ext Subtable writer.
+ Table.write(self, writer, font, tableDict, value, repeatIndex)
class FeatureParams(Table):
- def getConverter(self, featureTag):
- tableClass = self.featureParamTypes.get(featureTag, self.defaultFeatureParams)
- return self.__class__(self.name, self.repeat, self.aux, tableClass)
+ def getConverter(self, featureTag):
+ tableClass = self.featureParamTypes.get(featureTag, self.defaultFeatureParams)
+ return self.__class__(self.name, self.repeat, self.aux, tableClass)
class ValueFormat(IntValue):
- staticSize = 2
- def __init__(self, name, repeat, aux, tableClass=None, *, description=""):
- BaseConverter.__init__(
- self, name, repeat, aux, tableClass, description=description
- )
- self.which = "ValueFormat" + ("2" if name[-1] == "2" else "1")
- def read(self, reader, font, tableDict):
- format = reader.readUShort()
- reader[self.which] = ValueRecordFactory(format)
- return format
- def write(self, writer, font, tableDict, format, repeatIndex=None):
- writer.writeUShort(format)
- writer[self.which] = ValueRecordFactory(format)
+ staticSize = 2
+
+ def __init__(self, name, repeat, aux, tableClass=None, *, description=""):
+ BaseConverter.__init__(
+ self, name, repeat, aux, tableClass, description=description
+ )
+ self.which = "ValueFormat" + ("2" if name[-1] == "2" else "1")
+
+ def read(self, reader, font, tableDict):
+ format = reader.readUShort()
+ reader[self.which] = ValueRecordFactory(format)
+ return format
+
+ def write(self, writer, font, tableDict, format, repeatIndex=None):
+ writer.writeUShort(format)
+ writer[self.which] = ValueRecordFactory(format)
class ValueRecord(ValueFormat):
- def getRecordSize(self, reader):
- return 2 * len(reader[self.which])
- def read(self, reader, font, tableDict):
- return reader[self.which].readValueRecord(reader, font)
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer[self.which].writeValueRecord(writer, font, value)
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- if value is None:
- pass # NULL table, ignore
- else:
- value.toXML(xmlWriter, font, self.name, attrs)
- def xmlRead(self, attrs, content, font):
- from .otBase import ValueRecord
- value = ValueRecord()
- value.fromXML(None, attrs, content, font)
- return value
+ def getRecordSize(self, reader):
+ return 2 * len(reader[self.which])
+
+ def read(self, reader, font, tableDict):
+ return reader[self.which].readValueRecord(reader, font)
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer[self.which].writeValueRecord(writer, font, value)
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ if value is None:
+ pass # NULL table, ignore
+ else:
+ value.toXML(xmlWriter, font, self.name, attrs)
+
+ def xmlRead(self, attrs, content, font):
+ from .otBase import ValueRecord
+
+ value = ValueRecord()
+ value.fromXML(None, attrs, content, font)
+ return value
class AATLookup(BaseConverter):
- BIN_SEARCH_HEADER_SIZE = 10
-
- def __init__(self, name, repeat, aux, tableClass, *, description=""):
- BaseConverter.__init__(
- self, name, repeat, aux, tableClass, description=description
- )
- if issubclass(self.tableClass, SimpleValue):
- self.converter = self.tableClass(name='Value', repeat=None, aux=None)
- else:
- self.converter = Table(name='Value', repeat=None, aux=None, tableClass=self.tableClass)
-
- def read(self, reader, font, tableDict):
- format = reader.readUShort()
- if format == 0:
- return self.readFormat0(reader, font)
- elif format == 2:
- return self.readFormat2(reader, font)
- elif format == 4:
- return self.readFormat4(reader, font)
- elif format == 6:
- return self.readFormat6(reader, font)
- elif format == 8:
- return self.readFormat8(reader, font)
- else:
- assert False, "unsupported lookup format: %d" % format
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- values = list(sorted([(font.getGlyphID(glyph), val)
- for glyph, val in value.items()]))
- # TODO: Also implement format 4.
- formats = list(sorted(filter(None, [
- self.buildFormat0(writer, font, values),
- self.buildFormat2(writer, font, values),
- self.buildFormat6(writer, font, values),
- self.buildFormat8(writer, font, values),
- ])))
- # We use the format ID as secondary sort key to make the output
- # deterministic when multiple formats have same encoded size.
- dataSize, lookupFormat, writeMethod = formats[0]
- pos = writer.getDataLength()
- writeMethod()
- actualSize = writer.getDataLength() - pos
- assert actualSize == dataSize, (
- "AATLookup format %d claimed to write %d bytes, but wrote %d" %
- (lookupFormat, dataSize, actualSize))
-
- @staticmethod
- def writeBinSearchHeader(writer, numUnits, unitSize):
- writer.writeUShort(unitSize)
- writer.writeUShort(numUnits)
- searchRange, entrySelector, rangeShift = \
- getSearchRange(n=numUnits, itemSize=unitSize)
- writer.writeUShort(searchRange)
- writer.writeUShort(entrySelector)
- writer.writeUShort(rangeShift)
-
- def buildFormat0(self, writer, font, values):
- numGlyphs = len(font.getGlyphOrder())
- if len(values) != numGlyphs:
- return None
- valueSize = self.converter.staticSize
- return (2 + numGlyphs * valueSize, 0,
- lambda: self.writeFormat0(writer, font, values))
-
- def writeFormat0(self, writer, font, values):
- writer.writeUShort(0)
- for glyphID_, value in values:
- self.converter.write(
- writer, font, tableDict=None,
- value=value, repeatIndex=None)
-
- def buildFormat2(self, writer, font, values):
- segStart, segValue = values[0]
- segEnd = segStart
- segments = []
- for glyphID, curValue in values[1:]:
- if glyphID != segEnd + 1 or curValue != segValue:
- segments.append((segStart, segEnd, segValue))
- segStart = segEnd = glyphID
- segValue = curValue
- else:
- segEnd = glyphID
- segments.append((segStart, segEnd, segValue))
- valueSize = self.converter.staticSize
- numUnits, unitSize = len(segments) + 1, valueSize + 4
- return (2 + self.BIN_SEARCH_HEADER_SIZE + numUnits * unitSize, 2,
- lambda: self.writeFormat2(writer, font, segments))
-
- def writeFormat2(self, writer, font, segments):
- writer.writeUShort(2)
- valueSize = self.converter.staticSize
- numUnits, unitSize = len(segments), valueSize + 4
- self.writeBinSearchHeader(writer, numUnits, unitSize)
- for firstGlyph, lastGlyph, value in segments:
- writer.writeUShort(lastGlyph)
- writer.writeUShort(firstGlyph)
- self.converter.write(
- writer, font, tableDict=None,
- value=value, repeatIndex=None)
- writer.writeUShort(0xFFFF)
- writer.writeUShort(0xFFFF)
- writer.writeData(b'\x00' * valueSize)
-
- def buildFormat6(self, writer, font, values):
- valueSize = self.converter.staticSize
- numUnits, unitSize = len(values), valueSize + 2
- return (2 + self.BIN_SEARCH_HEADER_SIZE + (numUnits + 1) * unitSize, 6,
- lambda: self.writeFormat6(writer, font, values))
-
- def writeFormat6(self, writer, font, values):
- writer.writeUShort(6)
- valueSize = self.converter.staticSize
- numUnits, unitSize = len(values), valueSize + 2
- self.writeBinSearchHeader(writer, numUnits, unitSize)
- for glyphID, value in values:
- writer.writeUShort(glyphID)
- self.converter.write(
- writer, font, tableDict=None,
- value=value, repeatIndex=None)
- writer.writeUShort(0xFFFF)
- writer.writeData(b'\x00' * valueSize)
-
- def buildFormat8(self, writer, font, values):
- minGlyphID, maxGlyphID = values[0][0], values[-1][0]
- if len(values) != maxGlyphID - minGlyphID + 1:
- return None
- valueSize = self.converter.staticSize
- return (6 + len(values) * valueSize, 8,
- lambda: self.writeFormat8(writer, font, values))
-
- def writeFormat8(self, writer, font, values):
- firstGlyphID = values[0][0]
- writer.writeUShort(8)
- writer.writeUShort(firstGlyphID)
- writer.writeUShort(len(values))
- for _, value in values:
- self.converter.write(
- writer, font, tableDict=None,
- value=value, repeatIndex=None)
-
- def readFormat0(self, reader, font):
- numGlyphs = len(font.getGlyphOrder())
- data = self.converter.readArray(
- reader, font, tableDict=None, count=numGlyphs)
- return {font.getGlyphName(k): value
- for k, value in enumerate(data)}
-
- def readFormat2(self, reader, font):
- mapping = {}
- pos = reader.pos - 2 # start of table is at UShort for format
- unitSize, numUnits = reader.readUShort(), reader.readUShort()
- assert unitSize >= 4 + self.converter.staticSize, unitSize
- for i in range(numUnits):
- reader.seek(pos + i * unitSize + 12)
- last = reader.readUShort()
- first = reader.readUShort()
- value = self.converter.read(reader, font, tableDict=None)
- if last != 0xFFFF:
- for k in range(first, last + 1):
- mapping[font.getGlyphName(k)] = value
- return mapping
-
- def readFormat4(self, reader, font):
- mapping = {}
- pos = reader.pos - 2 # start of table is at UShort for format
- unitSize = reader.readUShort()
- assert unitSize >= 6, unitSize
- for i in range(reader.readUShort()):
- reader.seek(pos + i * unitSize + 12)
- last = reader.readUShort()
- first = reader.readUShort()
- offset = reader.readUShort()
- if last != 0xFFFF:
- dataReader = reader.getSubReader(0) # relative to current position
- dataReader.seek(pos + offset) # relative to start of table
- data = self.converter.readArray(
- dataReader, font, tableDict=None,
- count=last - first + 1)
- for k, v in enumerate(data):
- mapping[font.getGlyphName(first + k)] = v
- return mapping
-
- def readFormat6(self, reader, font):
- mapping = {}
- pos = reader.pos - 2 # start of table is at UShort for format
- unitSize = reader.readUShort()
- assert unitSize >= 2 + self.converter.staticSize, unitSize
- for i in range(reader.readUShort()):
- reader.seek(pos + i * unitSize + 12)
- glyphID = reader.readUShort()
- value = self.converter.read(
- reader, font, tableDict=None)
- if glyphID != 0xFFFF:
- mapping[font.getGlyphName(glyphID)] = value
- return mapping
-
- def readFormat8(self, reader, font):
- first = reader.readUShort()
- count = reader.readUShort()
- data = self.converter.readArray(
- reader, font, tableDict=None, count=count)
- return {font.getGlyphName(first + k): value
- for (k, value) in enumerate(data)}
-
- def xmlRead(self, attrs, content, font):
- value = {}
- for element in content:
- if isinstance(element, tuple):
- name, a, eltContent = element
- if name == "Lookup":
- value[a["glyph"]] = self.converter.xmlRead(a, eltContent, font)
- return value
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.begintag(name, attrs)
- xmlWriter.newline()
- for glyph, value in sorted(value.items()):
- self.converter.xmlWrite(
- xmlWriter, font, value=value,
- name="Lookup", attrs=[("glyph", glyph)])
- xmlWriter.endtag(name)
- xmlWriter.newline()
+ BIN_SEARCH_HEADER_SIZE = 10
+
+ def __init__(self, name, repeat, aux, tableClass, *, description=""):
+ BaseConverter.__init__(
+ self, name, repeat, aux, tableClass, description=description
+ )
+ if issubclass(self.tableClass, SimpleValue):
+ self.converter = self.tableClass(name="Value", repeat=None, aux=None)
+ else:
+ self.converter = Table(
+ name="Value", repeat=None, aux=None, tableClass=self.tableClass
+ )
+
+ def read(self, reader, font, tableDict):
+ format = reader.readUShort()
+ if format == 0:
+ return self.readFormat0(reader, font)
+ elif format == 2:
+ return self.readFormat2(reader, font)
+ elif format == 4:
+ return self.readFormat4(reader, font)
+ elif format == 6:
+ return self.readFormat6(reader, font)
+ elif format == 8:
+ return self.readFormat8(reader, font)
+ else:
+ assert False, "unsupported lookup format: %d" % format
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ values = list(
+ sorted([(font.getGlyphID(glyph), val) for glyph, val in value.items()])
+ )
+ # TODO: Also implement format 4.
+ formats = list(
+ sorted(
+ filter(
+ None,
+ [
+ self.buildFormat0(writer, font, values),
+ self.buildFormat2(writer, font, values),
+ self.buildFormat6(writer, font, values),
+ self.buildFormat8(writer, font, values),
+ ],
+ )
+ )
+ )
+ # We use the format ID as secondary sort key to make the output
+ # deterministic when multiple formats have same encoded size.
+ dataSize, lookupFormat, writeMethod = formats[0]
+ pos = writer.getDataLength()
+ writeMethod()
+ actualSize = writer.getDataLength() - pos
+ assert (
+ actualSize == dataSize
+ ), "AATLookup format %d claimed to write %d bytes, but wrote %d" % (
+ lookupFormat,
+ dataSize,
+ actualSize,
+ )
+
+ @staticmethod
+ def writeBinSearchHeader(writer, numUnits, unitSize):
+ writer.writeUShort(unitSize)
+ writer.writeUShort(numUnits)
+ searchRange, entrySelector, rangeShift = getSearchRange(
+ n=numUnits, itemSize=unitSize
+ )
+ writer.writeUShort(searchRange)
+ writer.writeUShort(entrySelector)
+ writer.writeUShort(rangeShift)
+
+ def buildFormat0(self, writer, font, values):
+ numGlyphs = len(font.getGlyphOrder())
+ if len(values) != numGlyphs:
+ return None
+ valueSize = self.converter.staticSize
+ return (
+ 2 + numGlyphs * valueSize,
+ 0,
+ lambda: self.writeFormat0(writer, font, values),
+ )
+
+ def writeFormat0(self, writer, font, values):
+ writer.writeUShort(0)
+ for glyphID_, value in values:
+ self.converter.write(
+ writer, font, tableDict=None, value=value, repeatIndex=None
+ )
+
+ def buildFormat2(self, writer, font, values):
+ segStart, segValue = values[0]
+ segEnd = segStart
+ segments = []
+ for glyphID, curValue in values[1:]:
+ if glyphID != segEnd + 1 or curValue != segValue:
+ segments.append((segStart, segEnd, segValue))
+ segStart = segEnd = glyphID
+ segValue = curValue
+ else:
+ segEnd = glyphID
+ segments.append((segStart, segEnd, segValue))
+ valueSize = self.converter.staticSize
+ numUnits, unitSize = len(segments) + 1, valueSize + 4
+ return (
+ 2 + self.BIN_SEARCH_HEADER_SIZE + numUnits * unitSize,
+ 2,
+ lambda: self.writeFormat2(writer, font, segments),
+ )
+
+ def writeFormat2(self, writer, font, segments):
+ writer.writeUShort(2)
+ valueSize = self.converter.staticSize
+ numUnits, unitSize = len(segments), valueSize + 4
+ self.writeBinSearchHeader(writer, numUnits, unitSize)
+ for firstGlyph, lastGlyph, value in segments:
+ writer.writeUShort(lastGlyph)
+ writer.writeUShort(firstGlyph)
+ self.converter.write(
+ writer, font, tableDict=None, value=value, repeatIndex=None
+ )
+ writer.writeUShort(0xFFFF)
+ writer.writeUShort(0xFFFF)
+ writer.writeData(b"\x00" * valueSize)
+
+ def buildFormat6(self, writer, font, values):
+ valueSize = self.converter.staticSize
+ numUnits, unitSize = len(values), valueSize + 2
+ return (
+ 2 + self.BIN_SEARCH_HEADER_SIZE + (numUnits + 1) * unitSize,
+ 6,
+ lambda: self.writeFormat6(writer, font, values),
+ )
+
+ def writeFormat6(self, writer, font, values):
+ writer.writeUShort(6)
+ valueSize = self.converter.staticSize
+ numUnits, unitSize = len(values), valueSize + 2
+ self.writeBinSearchHeader(writer, numUnits, unitSize)
+ for glyphID, value in values:
+ writer.writeUShort(glyphID)
+ self.converter.write(
+ writer, font, tableDict=None, value=value, repeatIndex=None
+ )
+ writer.writeUShort(0xFFFF)
+ writer.writeData(b"\x00" * valueSize)
+
+ def buildFormat8(self, writer, font, values):
+ minGlyphID, maxGlyphID = values[0][0], values[-1][0]
+ if len(values) != maxGlyphID - minGlyphID + 1:
+ return None
+ valueSize = self.converter.staticSize
+ return (
+ 6 + len(values) * valueSize,
+ 8,
+ lambda: self.writeFormat8(writer, font, values),
+ )
+
+ def writeFormat8(self, writer, font, values):
+ firstGlyphID = values[0][0]
+ writer.writeUShort(8)
+ writer.writeUShort(firstGlyphID)
+ writer.writeUShort(len(values))
+ for _, value in values:
+ self.converter.write(
+ writer, font, tableDict=None, value=value, repeatIndex=None
+ )
+
+ def readFormat0(self, reader, font):
+ numGlyphs = len(font.getGlyphOrder())
+ data = self.converter.readArray(reader, font, tableDict=None, count=numGlyphs)
+ return {font.getGlyphName(k): value for k, value in enumerate(data)}
+
+ def readFormat2(self, reader, font):
+ mapping = {}
+ pos = reader.pos - 2 # start of table is at UShort for format
+ unitSize, numUnits = reader.readUShort(), reader.readUShort()
+ assert unitSize >= 4 + self.converter.staticSize, unitSize
+ for i in range(numUnits):
+ reader.seek(pos + i * unitSize + 12)
+ last = reader.readUShort()
+ first = reader.readUShort()
+ value = self.converter.read(reader, font, tableDict=None)
+ if last != 0xFFFF:
+ for k in range(first, last + 1):
+ mapping[font.getGlyphName(k)] = value
+ return mapping
+
+ def readFormat4(self, reader, font):
+ mapping = {}
+ pos = reader.pos - 2 # start of table is at UShort for format
+ unitSize = reader.readUShort()
+ assert unitSize >= 6, unitSize
+ for i in range(reader.readUShort()):
+ reader.seek(pos + i * unitSize + 12)
+ last = reader.readUShort()
+ first = reader.readUShort()
+ offset = reader.readUShort()
+ if last != 0xFFFF:
+ dataReader = reader.getSubReader(0) # relative to current position
+ dataReader.seek(pos + offset) # relative to start of table
+ data = self.converter.readArray(
+ dataReader, font, tableDict=None, count=last - first + 1
+ )
+ for k, v in enumerate(data):
+ mapping[font.getGlyphName(first + k)] = v
+ return mapping
+
+ def readFormat6(self, reader, font):
+ mapping = {}
+ pos = reader.pos - 2 # start of table is at UShort for format
+ unitSize = reader.readUShort()
+ assert unitSize >= 2 + self.converter.staticSize, unitSize
+ for i in range(reader.readUShort()):
+ reader.seek(pos + i * unitSize + 12)
+ glyphID = reader.readUShort()
+ value = self.converter.read(reader, font, tableDict=None)
+ if glyphID != 0xFFFF:
+ mapping[font.getGlyphName(glyphID)] = value
+ return mapping
+
+ def readFormat8(self, reader, font):
+ first = reader.readUShort()
+ count = reader.readUShort()
+ data = self.converter.readArray(reader, font, tableDict=None, count=count)
+ return {font.getGlyphName(first + k): value for (k, value) in enumerate(data)}
+
+ def xmlRead(self, attrs, content, font):
+ value = {}
+ for element in content:
+ if isinstance(element, tuple):
+ name, a, eltContent = element
+ if name == "Lookup":
+ value[a["glyph"]] = self.converter.xmlRead(a, eltContent, font)
+ return value
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.begintag(name, attrs)
+ xmlWriter.newline()
+ for glyph, value in sorted(value.items()):
+ self.converter.xmlWrite(
+ xmlWriter, font, value=value, name="Lookup", attrs=[("glyph", glyph)]
+ )
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
# The AAT 'ankr' table has an unusual structure: An offset to an AATLookup
@@ -981,831 +1105,822 @@ class AATLookup(BaseConverter):
# to the data table to the offset found in the AATLookup, and then use
# the sum of these two offsets to find the actual data.
class AATLookupWithDataOffset(BaseConverter):
- def read(self, reader, font, tableDict):
- lookupOffset = reader.readULong()
- dataOffset = reader.readULong()
- lookupReader = reader.getSubReader(lookupOffset)
- lookup = AATLookup('DataOffsets', None, None, UShort)
- offsets = lookup.read(lookupReader, font, tableDict)
- result = {}
- for glyph, offset in offsets.items():
- dataReader = reader.getSubReader(offset + dataOffset)
- item = self.tableClass()
- item.decompile(dataReader, font)
- result[glyph] = item
- return result
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- # We do not work with OTTableWriter sub-writers because
- # the offsets in our AATLookup are relative to our data
- # table, for which we need to provide an offset value itself.
- # It might have been possible to somehow make a kludge for
- # performing this indirect offset computation directly inside
- # OTTableWriter. But this would have made the internal logic
- # of OTTableWriter even more complex than it already is,
- # so we decided to roll our own offset computation for the
- # contents of the AATLookup and associated data table.
- offsetByGlyph, offsetByData, dataLen = {}, {}, 0
- compiledData = []
- for glyph in sorted(value, key=font.getGlyphID):
- subWriter = OTTableWriter()
- value[glyph].compile(subWriter, font)
- data = subWriter.getAllData()
- offset = offsetByData.get(data, None)
- if offset == None:
- offset = dataLen
- dataLen = dataLen + len(data)
- offsetByData[data] = offset
- compiledData.append(data)
- offsetByGlyph[glyph] = offset
- # For calculating the offsets to our AATLookup and data table,
- # we can use the regular OTTableWriter infrastructure.
- lookupWriter = writer.getSubWriter(offsetSize=4)
- lookup = AATLookup('DataOffsets', None, None, UShort)
- lookup.write(lookupWriter, font, tableDict, offsetByGlyph, None)
-
- dataWriter = writer.getSubWriter(offsetSize=4)
- writer.writeSubTable(lookupWriter)
- writer.writeSubTable(dataWriter)
- for d in compiledData:
- dataWriter.writeData(d)
-
- def xmlRead(self, attrs, content, font):
- lookup = AATLookup('DataOffsets', None, None, self.tableClass)
- return lookup.xmlRead(attrs, content, font)
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- lookup = AATLookup('DataOffsets', None, None, self.tableClass)
- lookup.xmlWrite(xmlWriter, font, value, name, attrs)
+ def read(self, reader, font, tableDict):
+ lookupOffset = reader.readULong()
+ dataOffset = reader.readULong()
+ lookupReader = reader.getSubReader(lookupOffset)
+ lookup = AATLookup("DataOffsets", None, None, UShort)
+ offsets = lookup.read(lookupReader, font, tableDict)
+ result = {}
+ for glyph, offset in offsets.items():
+ dataReader = reader.getSubReader(offset + dataOffset)
+ item = self.tableClass()
+ item.decompile(dataReader, font)
+ result[glyph] = item
+ return result
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ # We do not work with OTTableWriter sub-writers because
+ # the offsets in our AATLookup are relative to our data
+ # table, for which we need to provide an offset value itself.
+ # It might have been possible to somehow make a kludge for
+ # performing this indirect offset computation directly inside
+ # OTTableWriter. But this would have made the internal logic
+ # of OTTableWriter even more complex than it already is,
+ # so we decided to roll our own offset computation for the
+ # contents of the AATLookup and associated data table.
+ offsetByGlyph, offsetByData, dataLen = {}, {}, 0
+ compiledData = []
+ for glyph in sorted(value, key=font.getGlyphID):
+ subWriter = OTTableWriter()
+ value[glyph].compile(subWriter, font)
+ data = subWriter.getAllData()
+ offset = offsetByData.get(data, None)
+ if offset == None:
+ offset = dataLen
+ dataLen = dataLen + len(data)
+ offsetByData[data] = offset
+ compiledData.append(data)
+ offsetByGlyph[glyph] = offset
+ # For calculating the offsets to our AATLookup and data table,
+ # we can use the regular OTTableWriter infrastructure.
+ lookupWriter = writer.getSubWriter()
+ lookup = AATLookup("DataOffsets", None, None, UShort)
+ lookup.write(lookupWriter, font, tableDict, offsetByGlyph, None)
+
+ dataWriter = writer.getSubWriter()
+ writer.writeSubTable(lookupWriter, offsetSize=4)
+ writer.writeSubTable(dataWriter, offsetSize=4)
+ for d in compiledData:
+ dataWriter.writeData(d)
+
+ def xmlRead(self, attrs, content, font):
+ lookup = AATLookup("DataOffsets", None, None, self.tableClass)
+ return lookup.xmlRead(attrs, content, font)
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ lookup = AATLookup("DataOffsets", None, None, self.tableClass)
+ lookup.xmlWrite(xmlWriter, font, value, name, attrs)
class MorxSubtableConverter(BaseConverter):
- _PROCESSING_ORDERS = {
- # bits 30 and 28 of morx.CoverageFlags; see morx spec
- (False, False): "LayoutOrder",
- (True, False): "ReversedLayoutOrder",
- (False, True): "LogicalOrder",
- (True, True): "ReversedLogicalOrder",
- }
-
- _PROCESSING_ORDERS_REVERSED = {
- val: key for key, val in _PROCESSING_ORDERS.items()
- }
-
- def __init__(self, name, repeat, aux, tableClass=None, *, description=""):
- BaseConverter.__init__(
- self, name, repeat, aux, tableClass, description=description
- )
-
- def _setTextDirectionFromCoverageFlags(self, flags, subtable):
- if (flags & 0x20) != 0:
- subtable.TextDirection = "Any"
- elif (flags & 0x80) != 0:
- subtable.TextDirection = "Vertical"
- else:
- subtable.TextDirection = "Horizontal"
-
- def read(self, reader, font, tableDict):
- pos = reader.pos
- m = MorxSubtable()
- m.StructLength = reader.readULong()
- flags = reader.readUInt8()
- orderKey = ((flags & 0x40) != 0, (flags & 0x10) != 0)
- m.ProcessingOrder = self._PROCESSING_ORDERS[orderKey]
- self._setTextDirectionFromCoverageFlags(flags, m)
- m.Reserved = reader.readUShort()
- m.Reserved |= (flags & 0xF) << 16
- m.MorphType = reader.readUInt8()
- m.SubFeatureFlags = reader.readULong()
- tableClass = lookupTypes["morx"].get(m.MorphType)
- if tableClass is None:
- assert False, ("unsupported 'morx' lookup type %s" %
- m.MorphType)
- # To decode AAT ligatures, we need to know the subtable size.
- # The easiest way to pass this along is to create a new reader
- # that works on just the subtable as its data.
- headerLength = reader.pos - pos
- data = reader.data[
- reader.pos
- : reader.pos + m.StructLength - headerLength]
- assert len(data) == m.StructLength - headerLength
- subReader = OTTableReader(data=data, tableTag=reader.tableTag)
- m.SubStruct = tableClass()
- m.SubStruct.decompile(subReader, font)
- reader.seek(pos + m.StructLength)
- return m
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.begintag(name, attrs)
- xmlWriter.newline()
- xmlWriter.comment("StructLength=%d" % value.StructLength)
- xmlWriter.newline()
- xmlWriter.simpletag("TextDirection", value=value.TextDirection)
- xmlWriter.newline()
- xmlWriter.simpletag("ProcessingOrder",
- value=value.ProcessingOrder)
- xmlWriter.newline()
- if value.Reserved != 0:
- xmlWriter.simpletag("Reserved",
- value="0x%04x" % value.Reserved)
- xmlWriter.newline()
- xmlWriter.comment("MorphType=%d" % value.MorphType)
- xmlWriter.newline()
- xmlWriter.simpletag("SubFeatureFlags",
- value="0x%08x" % value.SubFeatureFlags)
- xmlWriter.newline()
- value.SubStruct.toXML(xmlWriter, font)
- xmlWriter.endtag(name)
- xmlWriter.newline()
-
- def xmlRead(self, attrs, content, font):
- m = MorxSubtable()
- covFlags = 0
- m.Reserved = 0
- for eltName, eltAttrs, eltContent in filter(istuple, content):
- if eltName == "CoverageFlags":
- # Only in XML from old versions of fonttools.
- covFlags = safeEval(eltAttrs["value"])
- orderKey = ((covFlags & 0x40) != 0,
- (covFlags & 0x10) != 0)
- m.ProcessingOrder = self._PROCESSING_ORDERS[
- orderKey]
- self._setTextDirectionFromCoverageFlags(
- covFlags, m)
- elif eltName == "ProcessingOrder":
- m.ProcessingOrder = eltAttrs["value"]
- assert m.ProcessingOrder in self._PROCESSING_ORDERS_REVERSED, "unknown ProcessingOrder: %s" % m.ProcessingOrder
- elif eltName == "TextDirection":
- m.TextDirection = eltAttrs["value"]
- assert m.TextDirection in {"Horizontal", "Vertical", "Any"}, "unknown TextDirection %s" % m.TextDirection
- elif eltName == "Reserved":
- m.Reserved = safeEval(eltAttrs["value"])
- elif eltName == "SubFeatureFlags":
- m.SubFeatureFlags = safeEval(eltAttrs["value"])
- elif eltName.endswith("Morph"):
- m.fromXML(eltName, eltAttrs, eltContent, font)
- else:
- assert False, eltName
- m.Reserved = (covFlags & 0xF) << 16 | m.Reserved
- return m
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- covFlags = (value.Reserved & 0x000F0000) >> 16
- reverseOrder, logicalOrder = self._PROCESSING_ORDERS_REVERSED[
- value.ProcessingOrder]
- covFlags |= 0x80 if value.TextDirection == "Vertical" else 0
- covFlags |= 0x40 if reverseOrder else 0
- covFlags |= 0x20 if value.TextDirection == "Any" else 0
- covFlags |= 0x10 if logicalOrder else 0
- value.CoverageFlags = covFlags
- lengthIndex = len(writer.items)
- before = writer.getDataLength()
- value.StructLength = 0xdeadbeef
- # The high nibble of value.Reserved is actuallly encoded
- # into coverageFlags, so we need to clear it here.
- origReserved = value.Reserved # including high nibble
- value.Reserved = value.Reserved & 0xFFFF # without high nibble
- value.compile(writer, font)
- value.Reserved = origReserved # restore original value
- assert writer.items[lengthIndex] == b"\xde\xad\xbe\xef"
- length = writer.getDataLength() - before
- writer.items[lengthIndex] = struct.pack(">L", length)
+ _PROCESSING_ORDERS = {
+ # bits 30 and 28 of morx.CoverageFlags; see morx spec
+ (False, False): "LayoutOrder",
+ (True, False): "ReversedLayoutOrder",
+ (False, True): "LogicalOrder",
+ (True, True): "ReversedLogicalOrder",
+ }
+
+ _PROCESSING_ORDERS_REVERSED = {val: key for key, val in _PROCESSING_ORDERS.items()}
+
+ def __init__(self, name, repeat, aux, tableClass=None, *, description=""):
+ BaseConverter.__init__(
+ self, name, repeat, aux, tableClass, description=description
+ )
+
+ def _setTextDirectionFromCoverageFlags(self, flags, subtable):
+ if (flags & 0x20) != 0:
+ subtable.TextDirection = "Any"
+ elif (flags & 0x80) != 0:
+ subtable.TextDirection = "Vertical"
+ else:
+ subtable.TextDirection = "Horizontal"
+
+ def read(self, reader, font, tableDict):
+ pos = reader.pos
+ m = MorxSubtable()
+ m.StructLength = reader.readULong()
+ flags = reader.readUInt8()
+ orderKey = ((flags & 0x40) != 0, (flags & 0x10) != 0)
+ m.ProcessingOrder = self._PROCESSING_ORDERS[orderKey]
+ self._setTextDirectionFromCoverageFlags(flags, m)
+ m.Reserved = reader.readUShort()
+ m.Reserved |= (flags & 0xF) << 16
+ m.MorphType = reader.readUInt8()
+ m.SubFeatureFlags = reader.readULong()
+ tableClass = lookupTypes["morx"].get(m.MorphType)
+ if tableClass is None:
+ assert False, "unsupported 'morx' lookup type %s" % m.MorphType
+ # To decode AAT ligatures, we need to know the subtable size.
+ # The easiest way to pass this along is to create a new reader
+ # that works on just the subtable as its data.
+ headerLength = reader.pos - pos
+ data = reader.data[reader.pos : reader.pos + m.StructLength - headerLength]
+ assert len(data) == m.StructLength - headerLength
+ subReader = OTTableReader(data=data, tableTag=reader.tableTag)
+ m.SubStruct = tableClass()
+ m.SubStruct.decompile(subReader, font)
+ reader.seek(pos + m.StructLength)
+ return m
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.begintag(name, attrs)
+ xmlWriter.newline()
+ xmlWriter.comment("StructLength=%d" % value.StructLength)
+ xmlWriter.newline()
+ xmlWriter.simpletag("TextDirection", value=value.TextDirection)
+ xmlWriter.newline()
+ xmlWriter.simpletag("ProcessingOrder", value=value.ProcessingOrder)
+ xmlWriter.newline()
+ if value.Reserved != 0:
+ xmlWriter.simpletag("Reserved", value="0x%04x" % value.Reserved)
+ xmlWriter.newline()
+ xmlWriter.comment("MorphType=%d" % value.MorphType)
+ xmlWriter.newline()
+ xmlWriter.simpletag("SubFeatureFlags", value="0x%08x" % value.SubFeatureFlags)
+ xmlWriter.newline()
+ value.SubStruct.toXML(xmlWriter, font)
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
+
+ def xmlRead(self, attrs, content, font):
+ m = MorxSubtable()
+ covFlags = 0
+ m.Reserved = 0
+ for eltName, eltAttrs, eltContent in filter(istuple, content):
+ if eltName == "CoverageFlags":
+ # Only in XML from old versions of fonttools.
+ covFlags = safeEval(eltAttrs["value"])
+ orderKey = ((covFlags & 0x40) != 0, (covFlags & 0x10) != 0)
+ m.ProcessingOrder = self._PROCESSING_ORDERS[orderKey]
+ self._setTextDirectionFromCoverageFlags(covFlags, m)
+ elif eltName == "ProcessingOrder":
+ m.ProcessingOrder = eltAttrs["value"]
+ assert m.ProcessingOrder in self._PROCESSING_ORDERS_REVERSED, (
+ "unknown ProcessingOrder: %s" % m.ProcessingOrder
+ )
+ elif eltName == "TextDirection":
+ m.TextDirection = eltAttrs["value"]
+ assert m.TextDirection in {"Horizontal", "Vertical", "Any"}, (
+ "unknown TextDirection %s" % m.TextDirection
+ )
+ elif eltName == "Reserved":
+ m.Reserved = safeEval(eltAttrs["value"])
+ elif eltName == "SubFeatureFlags":
+ m.SubFeatureFlags = safeEval(eltAttrs["value"])
+ elif eltName.endswith("Morph"):
+ m.fromXML(eltName, eltAttrs, eltContent, font)
+ else:
+ assert False, eltName
+ m.Reserved = (covFlags & 0xF) << 16 | m.Reserved
+ return m
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ covFlags = (value.Reserved & 0x000F0000) >> 16
+ reverseOrder, logicalOrder = self._PROCESSING_ORDERS_REVERSED[
+ value.ProcessingOrder
+ ]
+ covFlags |= 0x80 if value.TextDirection == "Vertical" else 0
+ covFlags |= 0x40 if reverseOrder else 0
+ covFlags |= 0x20 if value.TextDirection == "Any" else 0
+ covFlags |= 0x10 if logicalOrder else 0
+ value.CoverageFlags = covFlags
+ lengthIndex = len(writer.items)
+ before = writer.getDataLength()
+ value.StructLength = 0xDEADBEEF
+ # The high nibble of value.Reserved is actuallly encoded
+ # into coverageFlags, so we need to clear it here.
+ origReserved = value.Reserved # including high nibble
+ value.Reserved = value.Reserved & 0xFFFF # without high nibble
+ value.compile(writer, font)
+ value.Reserved = origReserved # restore original value
+ assert writer.items[lengthIndex] == b"\xde\xad\xbe\xef"
+ length = writer.getDataLength() - before
+ writer.items[lengthIndex] = struct.pack(">L", length)
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6Tables.html#ExtendedStateHeader
# TODO: Untangle the implementation of the various lookup-specific formats.
class STXHeader(BaseConverter):
- def __init__(self, name, repeat, aux, tableClass, *, description=""):
- BaseConverter.__init__(
- self, name, repeat, aux, tableClass, description=description
- )
- assert issubclass(self.tableClass, AATAction)
- self.classLookup = AATLookup("GlyphClasses", None, None, UShort)
- if issubclass(self.tableClass, ContextualMorphAction):
- self.perGlyphLookup = AATLookup("PerGlyphLookup",
- None, None, GlyphID)
- else:
- self.perGlyphLookup = None
-
- def read(self, reader, font, tableDict):
- table = AATStateTable()
- pos = reader.pos
- classTableReader = reader.getSubReader(0)
- stateArrayReader = reader.getSubReader(0)
- entryTableReader = reader.getSubReader(0)
- actionReader = None
- ligaturesReader = None
- table.GlyphClassCount = reader.readULong()
- classTableReader.seek(pos + reader.readULong())
- stateArrayReader.seek(pos + reader.readULong())
- entryTableReader.seek(pos + reader.readULong())
- if self.perGlyphLookup is not None:
- perGlyphTableReader = reader.getSubReader(0)
- perGlyphTableReader.seek(pos + reader.readULong())
- if issubclass(self.tableClass, LigatureMorphAction):
- actionReader = reader.getSubReader(0)
- actionReader.seek(pos + reader.readULong())
- ligComponentReader = reader.getSubReader(0)
- ligComponentReader.seek(pos + reader.readULong())
- ligaturesReader = reader.getSubReader(0)
- ligaturesReader.seek(pos + reader.readULong())
- numLigComponents = (ligaturesReader.pos
- - ligComponentReader.pos) // 2
- assert numLigComponents >= 0
- table.LigComponents = \
- ligComponentReader.readUShortArray(numLigComponents)
- table.Ligatures = self._readLigatures(ligaturesReader, font)
- elif issubclass(self.tableClass, InsertionMorphAction):
- actionReader = reader.getSubReader(0)
- actionReader.seek(pos + reader.readULong())
- table.GlyphClasses = self.classLookup.read(classTableReader,
- font, tableDict)
- numStates = int((entryTableReader.pos - stateArrayReader.pos)
- / (table.GlyphClassCount * 2))
- for stateIndex in range(numStates):
- state = AATState()
- table.States.append(state)
- for glyphClass in range(table.GlyphClassCount):
- entryIndex = stateArrayReader.readUShort()
- state.Transitions[glyphClass] = \
- self._readTransition(entryTableReader,
- entryIndex, font,
- actionReader)
- if self.perGlyphLookup is not None:
- table.PerGlyphLookups = self._readPerGlyphLookups(
- table, perGlyphTableReader, font)
- return table
-
- def _readTransition(self, reader, entryIndex, font, actionReader):
- transition = self.tableClass()
- entryReader = reader.getSubReader(
- reader.pos + entryIndex * transition.staticSize)
- transition.decompile(entryReader, font, actionReader)
- return transition
-
- def _readLigatures(self, reader, font):
- limit = len(reader.data)
- numLigatureGlyphs = (limit - reader.pos) // 2
- return font.getGlyphNameMany(reader.readUShortArray(numLigatureGlyphs))
-
- def _countPerGlyphLookups(self, table):
- # Somewhat annoyingly, the morx table does not encode
- # the size of the per-glyph table. So we need to find
- # the maximum value that MorphActions use as index
- # into this table.
- numLookups = 0
- for state in table.States:
- for t in state.Transitions.values():
- if isinstance(t, ContextualMorphAction):
- if t.MarkIndex != 0xFFFF:
- numLookups = max(
- numLookups,
- t.MarkIndex + 1)
- if t.CurrentIndex != 0xFFFF:
- numLookups = max(
- numLookups,
- t.CurrentIndex + 1)
- return numLookups
-
- def _readPerGlyphLookups(self, table, reader, font):
- pos = reader.pos
- lookups = []
- for _ in range(self._countPerGlyphLookups(table)):
- lookupReader = reader.getSubReader(0)
- lookupReader.seek(pos + reader.readULong())
- lookups.append(
- self.perGlyphLookup.read(lookupReader, font, {}))
- return lookups
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- glyphClassWriter = OTTableWriter()
- self.classLookup.write(glyphClassWriter, font, tableDict,
- value.GlyphClasses, repeatIndex=None)
- glyphClassData = pad(glyphClassWriter.getAllData(), 2)
- glyphClassCount = max(value.GlyphClasses.values()) + 1
- glyphClassTableOffset = 16 # size of STXHeader
- if self.perGlyphLookup is not None:
- glyphClassTableOffset += 4
-
- glyphClassTableOffset += self.tableClass.actionHeaderSize
- actionData, actionIndex = \
- self.tableClass.compileActions(font, value.States)
- stateArrayData, entryTableData = self._compileStates(
- font, value.States, glyphClassCount, actionIndex)
- stateArrayOffset = glyphClassTableOffset + len(glyphClassData)
- entryTableOffset = stateArrayOffset + len(stateArrayData)
- perGlyphOffset = entryTableOffset + len(entryTableData)
- perGlyphData = \
- pad(self._compilePerGlyphLookups(value, font), 4)
- if actionData is not None:
- actionOffset = entryTableOffset + len(entryTableData)
- else:
- actionOffset = None
-
- ligaturesOffset, ligComponentsOffset = None, None
- ligComponentsData = self._compileLigComponents(value, font)
- ligaturesData = self._compileLigatures(value, font)
- if ligComponentsData is not None:
- assert len(perGlyphData) == 0
- ligComponentsOffset = actionOffset + len(actionData)
- ligaturesOffset = ligComponentsOffset + len(ligComponentsData)
-
- writer.writeULong(glyphClassCount)
- writer.writeULong(glyphClassTableOffset)
- writer.writeULong(stateArrayOffset)
- writer.writeULong(entryTableOffset)
- if self.perGlyphLookup is not None:
- writer.writeULong(perGlyphOffset)
- if actionOffset is not None:
- writer.writeULong(actionOffset)
- if ligComponentsOffset is not None:
- writer.writeULong(ligComponentsOffset)
- writer.writeULong(ligaturesOffset)
- writer.writeData(glyphClassData)
- writer.writeData(stateArrayData)
- writer.writeData(entryTableData)
- writer.writeData(perGlyphData)
- if actionData is not None:
- writer.writeData(actionData)
- if ligComponentsData is not None:
- writer.writeData(ligComponentsData)
- if ligaturesData is not None:
- writer.writeData(ligaturesData)
-
- def _compileStates(self, font, states, glyphClassCount, actionIndex):
- stateArrayWriter = OTTableWriter()
- entries, entryIDs = [], {}
- for state in states:
- for glyphClass in range(glyphClassCount):
- transition = state.Transitions[glyphClass]
- entryWriter = OTTableWriter()
- transition.compile(entryWriter, font,
- actionIndex)
- entryData = entryWriter.getAllData()
- assert len(entryData) == transition.staticSize, ( \
- "%s has staticSize %d, "
- "but actually wrote %d bytes" % (
- repr(transition),
- transition.staticSize,
- len(entryData)))
- entryIndex = entryIDs.get(entryData)
- if entryIndex is None:
- entryIndex = len(entries)
- entryIDs[entryData] = entryIndex
- entries.append(entryData)
- stateArrayWriter.writeUShort(entryIndex)
- stateArrayData = pad(stateArrayWriter.getAllData(), 4)
- entryTableData = pad(bytesjoin(entries), 4)
- return stateArrayData, entryTableData
-
- def _compilePerGlyphLookups(self, table, font):
- if self.perGlyphLookup is None:
- return b""
- numLookups = self._countPerGlyphLookups(table)
- assert len(table.PerGlyphLookups) == numLookups, (
- "len(AATStateTable.PerGlyphLookups) is %d, "
- "but the actions inside the table refer to %d" %
- (len(table.PerGlyphLookups), numLookups))
- writer = OTTableWriter()
- for lookup in table.PerGlyphLookups:
- lookupWriter = writer.getSubWriter(offsetSize=4)
- self.perGlyphLookup.write(lookupWriter, font,
- {}, lookup, None)
- writer.writeSubTable(lookupWriter)
- return writer.getAllData()
-
- def _compileLigComponents(self, table, font):
- if not hasattr(table, "LigComponents"):
- return None
- writer = OTTableWriter()
- for component in table.LigComponents:
- writer.writeUShort(component)
- return writer.getAllData()
-
- def _compileLigatures(self, table, font):
- if not hasattr(table, "Ligatures"):
- return None
- writer = OTTableWriter()
- for glyphName in table.Ligatures:
- writer.writeUShort(font.getGlyphID(glyphName))
- return writer.getAllData()
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.begintag(name, attrs)
- xmlWriter.newline()
- xmlWriter.comment("GlyphClassCount=%s" %value.GlyphClassCount)
- xmlWriter.newline()
- for g, klass in sorted(value.GlyphClasses.items()):
- xmlWriter.simpletag("GlyphClass", glyph=g, value=klass)
- xmlWriter.newline()
- for stateIndex, state in enumerate(value.States):
- xmlWriter.begintag("State", index=stateIndex)
- xmlWriter.newline()
- for glyphClass, trans in sorted(state.Transitions.items()):
- trans.toXML(xmlWriter, font=font,
- attrs={"onGlyphClass": glyphClass},
- name="Transition")
- xmlWriter.endtag("State")
- xmlWriter.newline()
- for i, lookup in enumerate(value.PerGlyphLookups):
- xmlWriter.begintag("PerGlyphLookup", index=i)
- xmlWriter.newline()
- for glyph, val in sorted(lookup.items()):
- xmlWriter.simpletag("Lookup", glyph=glyph,
- value=val)
- xmlWriter.newline()
- xmlWriter.endtag("PerGlyphLookup")
- xmlWriter.newline()
- if hasattr(value, "LigComponents"):
- xmlWriter.begintag("LigComponents")
- xmlWriter.newline()
- for i, val in enumerate(getattr(value, "LigComponents")):
- xmlWriter.simpletag("LigComponent", index=i,
- value=val)
- xmlWriter.newline()
- xmlWriter.endtag("LigComponents")
- xmlWriter.newline()
- self._xmlWriteLigatures(xmlWriter, font, value, name, attrs)
- xmlWriter.endtag(name)
- xmlWriter.newline()
-
- def _xmlWriteLigatures(self, xmlWriter, font, value, name, attrs):
- if not hasattr(value, "Ligatures"):
- return
- xmlWriter.begintag("Ligatures")
- xmlWriter.newline()
- for i, g in enumerate(getattr(value, "Ligatures")):
- xmlWriter.simpletag("Ligature", index=i, glyph=g)
- xmlWriter.newline()
- xmlWriter.endtag("Ligatures")
- xmlWriter.newline()
-
- def xmlRead(self, attrs, content, font):
- table = AATStateTable()
- for eltName, eltAttrs, eltContent in filter(istuple, content):
- if eltName == "GlyphClass":
- glyph = eltAttrs["glyph"]
- value = eltAttrs["value"]
- table.GlyphClasses[glyph] = safeEval(value)
- elif eltName == "State":
- state = self._xmlReadState(eltAttrs, eltContent, font)
- table.States.append(state)
- elif eltName == "PerGlyphLookup":
- lookup = self.perGlyphLookup.xmlRead(
- eltAttrs, eltContent, font)
- table.PerGlyphLookups.append(lookup)
- elif eltName == "LigComponents":
- table.LigComponents = \
- self._xmlReadLigComponents(
- eltAttrs, eltContent, font)
- elif eltName == "Ligatures":
- table.Ligatures = \
- self._xmlReadLigatures(
- eltAttrs, eltContent, font)
- table.GlyphClassCount = max(table.GlyphClasses.values()) + 1
- return table
-
- def _xmlReadState(self, attrs, content, font):
- state = AATState()
- for eltName, eltAttrs, eltContent in filter(istuple, content):
- if eltName == "Transition":
- glyphClass = safeEval(eltAttrs["onGlyphClass"])
- transition = self.tableClass()
- transition.fromXML(eltName, eltAttrs,
- eltContent, font)
- state.Transitions[glyphClass] = transition
- return state
-
- def _xmlReadLigComponents(self, attrs, content, font):
- ligComponents = []
- for eltName, eltAttrs, _eltContent in filter(istuple, content):
- if eltName == "LigComponent":
- ligComponents.append(
- safeEval(eltAttrs["value"]))
- return ligComponents
-
- def _xmlReadLigatures(self, attrs, content, font):
- ligs = []
- for eltName, eltAttrs, _eltContent in filter(istuple, content):
- if eltName == "Ligature":
- ligs.append(eltAttrs["glyph"])
- return ligs
+ def __init__(self, name, repeat, aux, tableClass, *, description=""):
+ BaseConverter.__init__(
+ self, name, repeat, aux, tableClass, description=description
+ )
+ assert issubclass(self.tableClass, AATAction)
+ self.classLookup = AATLookup("GlyphClasses", None, None, UShort)
+ if issubclass(self.tableClass, ContextualMorphAction):
+ self.perGlyphLookup = AATLookup("PerGlyphLookup", None, None, GlyphID)
+ else:
+ self.perGlyphLookup = None
+
+ def read(self, reader, font, tableDict):
+ table = AATStateTable()
+ pos = reader.pos
+ classTableReader = reader.getSubReader(0)
+ stateArrayReader = reader.getSubReader(0)
+ entryTableReader = reader.getSubReader(0)
+ actionReader = None
+ ligaturesReader = None
+ table.GlyphClassCount = reader.readULong()
+ classTableReader.seek(pos + reader.readULong())
+ stateArrayReader.seek(pos + reader.readULong())
+ entryTableReader.seek(pos + reader.readULong())
+ if self.perGlyphLookup is not None:
+ perGlyphTableReader = reader.getSubReader(0)
+ perGlyphTableReader.seek(pos + reader.readULong())
+ if issubclass(self.tableClass, LigatureMorphAction):
+ actionReader = reader.getSubReader(0)
+ actionReader.seek(pos + reader.readULong())
+ ligComponentReader = reader.getSubReader(0)
+ ligComponentReader.seek(pos + reader.readULong())
+ ligaturesReader = reader.getSubReader(0)
+ ligaturesReader.seek(pos + reader.readULong())
+ numLigComponents = (ligaturesReader.pos - ligComponentReader.pos) // 2
+ assert numLigComponents >= 0
+ table.LigComponents = ligComponentReader.readUShortArray(numLigComponents)
+ table.Ligatures = self._readLigatures(ligaturesReader, font)
+ elif issubclass(self.tableClass, InsertionMorphAction):
+ actionReader = reader.getSubReader(0)
+ actionReader.seek(pos + reader.readULong())
+ table.GlyphClasses = self.classLookup.read(classTableReader, font, tableDict)
+ numStates = int(
+ (entryTableReader.pos - stateArrayReader.pos) / (table.GlyphClassCount * 2)
+ )
+ for stateIndex in range(numStates):
+ state = AATState()
+ table.States.append(state)
+ for glyphClass in range(table.GlyphClassCount):
+ entryIndex = stateArrayReader.readUShort()
+ state.Transitions[glyphClass] = self._readTransition(
+ entryTableReader, entryIndex, font, actionReader
+ )
+ if self.perGlyphLookup is not None:
+ table.PerGlyphLookups = self._readPerGlyphLookups(
+ table, perGlyphTableReader, font
+ )
+ return table
+
+ def _readTransition(self, reader, entryIndex, font, actionReader):
+ transition = self.tableClass()
+ entryReader = reader.getSubReader(
+ reader.pos + entryIndex * transition.staticSize
+ )
+ transition.decompile(entryReader, font, actionReader)
+ return transition
+
+ def _readLigatures(self, reader, font):
+ limit = len(reader.data)
+ numLigatureGlyphs = (limit - reader.pos) // 2
+ return font.getGlyphNameMany(reader.readUShortArray(numLigatureGlyphs))
+
+ def _countPerGlyphLookups(self, table):
+ # Somewhat annoyingly, the morx table does not encode
+ # the size of the per-glyph table. So we need to find
+ # the maximum value that MorphActions use as index
+ # into this table.
+ numLookups = 0
+ for state in table.States:
+ for t in state.Transitions.values():
+ if isinstance(t, ContextualMorphAction):
+ if t.MarkIndex != 0xFFFF:
+ numLookups = max(numLookups, t.MarkIndex + 1)
+ if t.CurrentIndex != 0xFFFF:
+ numLookups = max(numLookups, t.CurrentIndex + 1)
+ return numLookups
+
+ def _readPerGlyphLookups(self, table, reader, font):
+ pos = reader.pos
+ lookups = []
+ for _ in range(self._countPerGlyphLookups(table)):
+ lookupReader = reader.getSubReader(0)
+ lookupReader.seek(pos + reader.readULong())
+ lookups.append(self.perGlyphLookup.read(lookupReader, font, {}))
+ return lookups
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ glyphClassWriter = OTTableWriter()
+ self.classLookup.write(
+ glyphClassWriter, font, tableDict, value.GlyphClasses, repeatIndex=None
+ )
+ glyphClassData = pad(glyphClassWriter.getAllData(), 2)
+ glyphClassCount = max(value.GlyphClasses.values()) + 1
+ glyphClassTableOffset = 16 # size of STXHeader
+ if self.perGlyphLookup is not None:
+ glyphClassTableOffset += 4
+
+ glyphClassTableOffset += self.tableClass.actionHeaderSize
+ actionData, actionIndex = self.tableClass.compileActions(font, value.States)
+ stateArrayData, entryTableData = self._compileStates(
+ font, value.States, glyphClassCount, actionIndex
+ )
+ stateArrayOffset = glyphClassTableOffset + len(glyphClassData)
+ entryTableOffset = stateArrayOffset + len(stateArrayData)
+ perGlyphOffset = entryTableOffset + len(entryTableData)
+ perGlyphData = pad(self._compilePerGlyphLookups(value, font), 4)
+ if actionData is not None:
+ actionOffset = entryTableOffset + len(entryTableData)
+ else:
+ actionOffset = None
+
+ ligaturesOffset, ligComponentsOffset = None, None
+ ligComponentsData = self._compileLigComponents(value, font)
+ ligaturesData = self._compileLigatures(value, font)
+ if ligComponentsData is not None:
+ assert len(perGlyphData) == 0
+ ligComponentsOffset = actionOffset + len(actionData)
+ ligaturesOffset = ligComponentsOffset + len(ligComponentsData)
+
+ writer.writeULong(glyphClassCount)
+ writer.writeULong(glyphClassTableOffset)
+ writer.writeULong(stateArrayOffset)
+ writer.writeULong(entryTableOffset)
+ if self.perGlyphLookup is not None:
+ writer.writeULong(perGlyphOffset)
+ if actionOffset is not None:
+ writer.writeULong(actionOffset)
+ if ligComponentsOffset is not None:
+ writer.writeULong(ligComponentsOffset)
+ writer.writeULong(ligaturesOffset)
+ writer.writeData(glyphClassData)
+ writer.writeData(stateArrayData)
+ writer.writeData(entryTableData)
+ writer.writeData(perGlyphData)
+ if actionData is not None:
+ writer.writeData(actionData)
+ if ligComponentsData is not None:
+ writer.writeData(ligComponentsData)
+ if ligaturesData is not None:
+ writer.writeData(ligaturesData)
+
+ def _compileStates(self, font, states, glyphClassCount, actionIndex):
+ stateArrayWriter = OTTableWriter()
+ entries, entryIDs = [], {}
+ for state in states:
+ for glyphClass in range(glyphClassCount):
+ transition = state.Transitions[glyphClass]
+ entryWriter = OTTableWriter()
+ transition.compile(entryWriter, font, actionIndex)
+ entryData = entryWriter.getAllData()
+ assert (
+ len(entryData) == transition.staticSize
+ ), "%s has staticSize %d, " "but actually wrote %d bytes" % (
+ repr(transition),
+ transition.staticSize,
+ len(entryData),
+ )
+ entryIndex = entryIDs.get(entryData)
+ if entryIndex is None:
+ entryIndex = len(entries)
+ entryIDs[entryData] = entryIndex
+ entries.append(entryData)
+ stateArrayWriter.writeUShort(entryIndex)
+ stateArrayData = pad(stateArrayWriter.getAllData(), 4)
+ entryTableData = pad(bytesjoin(entries), 4)
+ return stateArrayData, entryTableData
+
+ def _compilePerGlyphLookups(self, table, font):
+ if self.perGlyphLookup is None:
+ return b""
+ numLookups = self._countPerGlyphLookups(table)
+ assert len(table.PerGlyphLookups) == numLookups, (
+ "len(AATStateTable.PerGlyphLookups) is %d, "
+ "but the actions inside the table refer to %d"
+ % (len(table.PerGlyphLookups), numLookups)
+ )
+ writer = OTTableWriter()
+ for lookup in table.PerGlyphLookups:
+ lookupWriter = writer.getSubWriter()
+ self.perGlyphLookup.write(lookupWriter, font, {}, lookup, None)
+ writer.writeSubTable(lookupWriter, offsetSize=4)
+ return writer.getAllData()
+
+ def _compileLigComponents(self, table, font):
+ if not hasattr(table, "LigComponents"):
+ return None
+ writer = OTTableWriter()
+ for component in table.LigComponents:
+ writer.writeUShort(component)
+ return writer.getAllData()
+
+ def _compileLigatures(self, table, font):
+ if not hasattr(table, "Ligatures"):
+ return None
+ writer = OTTableWriter()
+ for glyphName in table.Ligatures:
+ writer.writeUShort(font.getGlyphID(glyphName))
+ return writer.getAllData()
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.begintag(name, attrs)
+ xmlWriter.newline()
+ xmlWriter.comment("GlyphClassCount=%s" % value.GlyphClassCount)
+ xmlWriter.newline()
+ for g, klass in sorted(value.GlyphClasses.items()):
+ xmlWriter.simpletag("GlyphClass", glyph=g, value=klass)
+ xmlWriter.newline()
+ for stateIndex, state in enumerate(value.States):
+ xmlWriter.begintag("State", index=stateIndex)
+ xmlWriter.newline()
+ for glyphClass, trans in sorted(state.Transitions.items()):
+ trans.toXML(
+ xmlWriter,
+ font=font,
+ attrs={"onGlyphClass": glyphClass},
+ name="Transition",
+ )
+ xmlWriter.endtag("State")
+ xmlWriter.newline()
+ for i, lookup in enumerate(value.PerGlyphLookups):
+ xmlWriter.begintag("PerGlyphLookup", index=i)
+ xmlWriter.newline()
+ for glyph, val in sorted(lookup.items()):
+ xmlWriter.simpletag("Lookup", glyph=glyph, value=val)
+ xmlWriter.newline()
+ xmlWriter.endtag("PerGlyphLookup")
+ xmlWriter.newline()
+ if hasattr(value, "LigComponents"):
+ xmlWriter.begintag("LigComponents")
+ xmlWriter.newline()
+ for i, val in enumerate(getattr(value, "LigComponents")):
+ xmlWriter.simpletag("LigComponent", index=i, value=val)
+ xmlWriter.newline()
+ xmlWriter.endtag("LigComponents")
+ xmlWriter.newline()
+ self._xmlWriteLigatures(xmlWriter, font, value, name, attrs)
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
+
+ def _xmlWriteLigatures(self, xmlWriter, font, value, name, attrs):
+ if not hasattr(value, "Ligatures"):
+ return
+ xmlWriter.begintag("Ligatures")
+ xmlWriter.newline()
+ for i, g in enumerate(getattr(value, "Ligatures")):
+ xmlWriter.simpletag("Ligature", index=i, glyph=g)
+ xmlWriter.newline()
+ xmlWriter.endtag("Ligatures")
+ xmlWriter.newline()
+
+ def xmlRead(self, attrs, content, font):
+ table = AATStateTable()
+ for eltName, eltAttrs, eltContent in filter(istuple, content):
+ if eltName == "GlyphClass":
+ glyph = eltAttrs["glyph"]
+ value = eltAttrs["value"]
+ table.GlyphClasses[glyph] = safeEval(value)
+ elif eltName == "State":
+ state = self._xmlReadState(eltAttrs, eltContent, font)
+ table.States.append(state)
+ elif eltName == "PerGlyphLookup":
+ lookup = self.perGlyphLookup.xmlRead(eltAttrs, eltContent, font)
+ table.PerGlyphLookups.append(lookup)
+ elif eltName == "LigComponents":
+ table.LigComponents = self._xmlReadLigComponents(
+ eltAttrs, eltContent, font
+ )
+ elif eltName == "Ligatures":
+ table.Ligatures = self._xmlReadLigatures(eltAttrs, eltContent, font)
+ table.GlyphClassCount = max(table.GlyphClasses.values()) + 1
+ return table
+
+ def _xmlReadState(self, attrs, content, font):
+ state = AATState()
+ for eltName, eltAttrs, eltContent in filter(istuple, content):
+ if eltName == "Transition":
+ glyphClass = safeEval(eltAttrs["onGlyphClass"])
+ transition = self.tableClass()
+ transition.fromXML(eltName, eltAttrs, eltContent, font)
+ state.Transitions[glyphClass] = transition
+ return state
+
+ def _xmlReadLigComponents(self, attrs, content, font):
+ ligComponents = []
+ for eltName, eltAttrs, _eltContent in filter(istuple, content):
+ if eltName == "LigComponent":
+ ligComponents.append(safeEval(eltAttrs["value"]))
+ return ligComponents
+
+ def _xmlReadLigatures(self, attrs, content, font):
+ ligs = []
+ for eltName, eltAttrs, _eltContent in filter(istuple, content):
+ if eltName == "Ligature":
+ ligs.append(eltAttrs["glyph"])
+ return ligs
class CIDGlyphMap(BaseConverter):
- def read(self, reader, font, tableDict):
- numCIDs = reader.readUShort()
- result = {}
- for cid, glyphID in enumerate(reader.readUShortArray(numCIDs)):
- if glyphID != 0xFFFF:
- result[cid] = font.getGlyphName(glyphID)
- return result
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- items = {cid: font.getGlyphID(glyph)
- for cid, glyph in value.items()}
- count = max(items) + 1 if items else 0
- writer.writeUShort(count)
- for cid in range(count):
- writer.writeUShort(items.get(cid, 0xFFFF))
-
- def xmlRead(self, attrs, content, font):
- result = {}
- for eName, eAttrs, _eContent in filter(istuple, content):
- if eName == "CID":
- result[safeEval(eAttrs["cid"])] = \
- eAttrs["glyph"].strip()
- return result
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.begintag(name, attrs)
- xmlWriter.newline()
- for cid, glyph in sorted(value.items()):
- if glyph is not None and glyph != 0xFFFF:
- xmlWriter.simpletag(
- "CID", cid=cid, glyph=glyph)
- xmlWriter.newline()
- xmlWriter.endtag(name)
- xmlWriter.newline()
+ def read(self, reader, font, tableDict):
+ numCIDs = reader.readUShort()
+ result = {}
+ for cid, glyphID in enumerate(reader.readUShortArray(numCIDs)):
+ if glyphID != 0xFFFF:
+ result[cid] = font.getGlyphName(glyphID)
+ return result
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ items = {cid: font.getGlyphID(glyph) for cid, glyph in value.items()}
+ count = max(items) + 1 if items else 0
+ writer.writeUShort(count)
+ for cid in range(count):
+ writer.writeUShort(items.get(cid, 0xFFFF))
+
+ def xmlRead(self, attrs, content, font):
+ result = {}
+ for eName, eAttrs, _eContent in filter(istuple, content):
+ if eName == "CID":
+ result[safeEval(eAttrs["cid"])] = eAttrs["glyph"].strip()
+ return result
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.begintag(name, attrs)
+ xmlWriter.newline()
+ for cid, glyph in sorted(value.items()):
+ if glyph is not None and glyph != 0xFFFF:
+ xmlWriter.simpletag("CID", cid=cid, glyph=glyph)
+ xmlWriter.newline()
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
class GlyphCIDMap(BaseConverter):
- def read(self, reader, font, tableDict):
- glyphOrder = font.getGlyphOrder()
- count = reader.readUShort()
- cids = reader.readUShortArray(count)
- if count > len(glyphOrder):
- log.warning("GlyphCIDMap has %d elements, "
- "but the font has only %d glyphs; "
- "ignoring the rest" %
- (count, len(glyphOrder)))
- result = {}
- for glyphID in range(min(len(cids), len(glyphOrder))):
- cid = cids[glyphID]
- if cid != 0xFFFF:
- result[glyphOrder[glyphID]] = cid
- return result
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- items = {font.getGlyphID(g): cid
- for g, cid in value.items()
- if cid is not None and cid != 0xFFFF}
- count = max(items) + 1 if items else 0
- writer.writeUShort(count)
- for glyphID in range(count):
- writer.writeUShort(items.get(glyphID, 0xFFFF))
-
- def xmlRead(self, attrs, content, font):
- result = {}
- for eName, eAttrs, _eContent in filter(istuple, content):
- if eName == "CID":
- result[eAttrs["glyph"]] = \
- safeEval(eAttrs["value"])
- return result
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.begintag(name, attrs)
- xmlWriter.newline()
- for glyph, cid in sorted(value.items()):
- if cid is not None and cid != 0xFFFF:
- xmlWriter.simpletag(
- "CID", glyph=glyph, value=cid)
- xmlWriter.newline()
- xmlWriter.endtag(name)
- xmlWriter.newline()
+ def read(self, reader, font, tableDict):
+ glyphOrder = font.getGlyphOrder()
+ count = reader.readUShort()
+ cids = reader.readUShortArray(count)
+ if count > len(glyphOrder):
+ log.warning(
+ "GlyphCIDMap has %d elements, "
+ "but the font has only %d glyphs; "
+ "ignoring the rest" % (count, len(glyphOrder))
+ )
+ result = {}
+ for glyphID in range(min(len(cids), len(glyphOrder))):
+ cid = cids[glyphID]
+ if cid != 0xFFFF:
+ result[glyphOrder[glyphID]] = cid
+ return result
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ items = {
+ font.getGlyphID(g): cid
+ for g, cid in value.items()
+ if cid is not None and cid != 0xFFFF
+ }
+ count = max(items) + 1 if items else 0
+ writer.writeUShort(count)
+ for glyphID in range(count):
+ writer.writeUShort(items.get(glyphID, 0xFFFF))
+
+ def xmlRead(self, attrs, content, font):
+ result = {}
+ for eName, eAttrs, _eContent in filter(istuple, content):
+ if eName == "CID":
+ result[eAttrs["glyph"]] = safeEval(eAttrs["value"])
+ return result
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.begintag(name, attrs)
+ xmlWriter.newline()
+ for glyph, cid in sorted(value.items()):
+ if cid is not None and cid != 0xFFFF:
+ xmlWriter.simpletag("CID", glyph=glyph, value=cid)
+ xmlWriter.newline()
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
class DeltaValue(BaseConverter):
-
- def read(self, reader, font, tableDict):
- StartSize = tableDict["StartSize"]
- EndSize = tableDict["EndSize"]
- DeltaFormat = tableDict["DeltaFormat"]
- assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
- nItems = EndSize - StartSize + 1
- nBits = 1 << DeltaFormat
- minusOffset = 1 << nBits
- mask = (1 << nBits) - 1
- signMask = 1 << (nBits - 1)
-
- DeltaValue = []
- tmp, shift = 0, 0
- for i in range(nItems):
- if shift == 0:
- tmp, shift = reader.readUShort(), 16
- shift = shift - nBits
- value = (tmp >> shift) & mask
- if value & signMask:
- value = value - minusOffset
- DeltaValue.append(value)
- return DeltaValue
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- StartSize = tableDict["StartSize"]
- EndSize = tableDict["EndSize"]
- DeltaFormat = tableDict["DeltaFormat"]
- DeltaValue = value
- assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
- nItems = EndSize - StartSize + 1
- nBits = 1 << DeltaFormat
- assert len(DeltaValue) == nItems
- mask = (1 << nBits) - 1
-
- tmp, shift = 0, 16
- for value in DeltaValue:
- shift = shift - nBits
- tmp = tmp | ((value & mask) << shift)
- if shift == 0:
- writer.writeUShort(tmp)
- tmp, shift = 0, 16
- if shift != 16:
- writer.writeUShort(tmp)
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.simpletag(name, attrs + [("value", value)])
- xmlWriter.newline()
-
- def xmlRead(self, attrs, content, font):
- return safeEval(attrs["value"])
+ def read(self, reader, font, tableDict):
+ StartSize = tableDict["StartSize"]
+ EndSize = tableDict["EndSize"]
+ DeltaFormat = tableDict["DeltaFormat"]
+ assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
+ nItems = EndSize - StartSize + 1
+ nBits = 1 << DeltaFormat
+ minusOffset = 1 << nBits
+ mask = (1 << nBits) - 1
+ signMask = 1 << (nBits - 1)
+
+ DeltaValue = []
+ tmp, shift = 0, 0
+ for i in range(nItems):
+ if shift == 0:
+ tmp, shift = reader.readUShort(), 16
+ shift = shift - nBits
+ value = (tmp >> shift) & mask
+ if value & signMask:
+ value = value - minusOffset
+ DeltaValue.append(value)
+ return DeltaValue
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ StartSize = tableDict["StartSize"]
+ EndSize = tableDict["EndSize"]
+ DeltaFormat = tableDict["DeltaFormat"]
+ DeltaValue = value
+ assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
+ nItems = EndSize - StartSize + 1
+ nBits = 1 << DeltaFormat
+ assert len(DeltaValue) == nItems
+ mask = (1 << nBits) - 1
+
+ tmp, shift = 0, 16
+ for value in DeltaValue:
+ shift = shift - nBits
+ tmp = tmp | ((value & mask) << shift)
+ if shift == 0:
+ writer.writeUShort(tmp)
+ tmp, shift = 0, 16
+ if shift != 16:
+ writer.writeUShort(tmp)
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.simpletag(name, attrs + [("value", value)])
+ xmlWriter.newline()
+
+ def xmlRead(self, attrs, content, font):
+ return safeEval(attrs["value"])
class VarIdxMapValue(BaseConverter):
-
- def read(self, reader, font, tableDict):
- fmt = tableDict['EntryFormat']
- nItems = tableDict['MappingCount']
-
- innerBits = 1 + (fmt & 0x000F)
- innerMask = (1<<innerBits) - 1
- outerMask = 0xFFFFFFFF - innerMask
- outerShift = 16 - innerBits
-
- entrySize = 1 + ((fmt & 0x0030) >> 4)
- readArray = {
- 1: reader.readUInt8Array,
- 2: reader.readUShortArray,
- 3: reader.readUInt24Array,
- 4: reader.readULongArray,
- }[entrySize]
-
- return [(((raw & outerMask) << outerShift) | (raw & innerMask))
- for raw in readArray(nItems)]
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- fmt = tableDict['EntryFormat']
- mapping = value
- writer['MappingCount'].setValue(len(mapping))
-
- innerBits = 1 + (fmt & 0x000F)
- innerMask = (1<<innerBits) - 1
- outerShift = 16 - innerBits
-
- entrySize = 1 + ((fmt & 0x0030) >> 4)
- writeArray = {
- 1: writer.writeUInt8Array,
- 2: writer.writeUShortArray,
- 3: writer.writeUInt24Array,
- 4: writer.writeULongArray,
- }[entrySize]
-
- writeArray([(((idx & 0xFFFF0000) >> outerShift) | (idx & innerMask))
- for idx in mapping])
+ def read(self, reader, font, tableDict):
+ fmt = tableDict["EntryFormat"]
+ nItems = tableDict["MappingCount"]
+
+ innerBits = 1 + (fmt & 0x000F)
+ innerMask = (1 << innerBits) - 1
+ outerMask = 0xFFFFFFFF - innerMask
+ outerShift = 16 - innerBits
+
+ entrySize = 1 + ((fmt & 0x0030) >> 4)
+ readArray = {
+ 1: reader.readUInt8Array,
+ 2: reader.readUShortArray,
+ 3: reader.readUInt24Array,
+ 4: reader.readULongArray,
+ }[entrySize]
+
+ return [
+ (((raw & outerMask) << outerShift) | (raw & innerMask))
+ for raw in readArray(nItems)
+ ]
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ fmt = tableDict["EntryFormat"]
+ mapping = value
+ writer["MappingCount"].setValue(len(mapping))
+
+ innerBits = 1 + (fmt & 0x000F)
+ innerMask = (1 << innerBits) - 1
+ outerShift = 16 - innerBits
+
+ entrySize = 1 + ((fmt & 0x0030) >> 4)
+ writeArray = {
+ 1: writer.writeUInt8Array,
+ 2: writer.writeUShortArray,
+ 3: writer.writeUInt24Array,
+ 4: writer.writeULongArray,
+ }[entrySize]
+
+ writeArray(
+ [
+ (((idx & 0xFFFF0000) >> outerShift) | (idx & innerMask))
+ for idx in mapping
+ ]
+ )
class VarDataValue(BaseConverter):
+ def read(self, reader, font, tableDict):
+ values = []
- def read(self, reader, font, tableDict):
- values = []
+ regionCount = tableDict["VarRegionCount"]
+ wordCount = tableDict["NumShorts"]
- regionCount = tableDict["VarRegionCount"]
- wordCount = tableDict["NumShorts"]
+ # https://github.com/fonttools/fonttools/issues/2279
+ longWords = bool(wordCount & 0x8000)
+ wordCount = wordCount & 0x7FFF
- # https://github.com/fonttools/fonttools/issues/2279
- longWords = bool(wordCount & 0x8000)
- wordCount = wordCount & 0x7FFF
+ if longWords:
+ readBigArray, readSmallArray = reader.readLongArray, reader.readShortArray
+ else:
+ readBigArray, readSmallArray = reader.readShortArray, reader.readInt8Array
- if longWords:
- readBigArray, readSmallArray = reader.readLongArray, reader.readShortArray
- else:
- readBigArray, readSmallArray = reader.readShortArray, reader.readInt8Array
+ n1, n2 = min(regionCount, wordCount), max(regionCount, wordCount)
+ values.extend(readBigArray(n1))
+ values.extend(readSmallArray(n2 - n1))
+ if n2 > regionCount: # Padding
+ del values[regionCount:]
- n1, n2 = min(regionCount, wordCount), max(regionCount, wordCount)
- values.extend(readBigArray(n1))
- values.extend(readSmallArray(n2 - n1))
- if n2 > regionCount: # Padding
- del values[regionCount:]
+ return values
- return values
+ def write(self, writer, font, tableDict, values, repeatIndex=None):
+ regionCount = tableDict["VarRegionCount"]
+ wordCount = tableDict["NumShorts"]
- def write(self, writer, font, tableDict, values, repeatIndex=None):
- regionCount = tableDict["VarRegionCount"]
- wordCount = tableDict["NumShorts"]
+ # https://github.com/fonttools/fonttools/issues/2279
+ longWords = bool(wordCount & 0x8000)
+ wordCount = wordCount & 0x7FFF
- # https://github.com/fonttools/fonttools/issues/2279
- longWords = bool(wordCount & 0x8000)
- wordCount = wordCount & 0x7FFF
+ (writeBigArray, writeSmallArray) = {
+ False: (writer.writeShortArray, writer.writeInt8Array),
+ True: (writer.writeLongArray, writer.writeShortArray),
+ }[longWords]
- (writeBigArray, writeSmallArray) = {
- False: (writer.writeShortArray, writer.writeInt8Array),
- True: (writer.writeLongArray, writer.writeShortArray),
- }[longWords]
+ n1, n2 = min(regionCount, wordCount), max(regionCount, wordCount)
+ writeBigArray(values[:n1])
+ writeSmallArray(values[n1:regionCount])
+ if n2 > regionCount: # Padding
+ writer.writeSmallArray([0] * (n2 - regionCount))
- n1, n2 = min(regionCount, wordCount), max(regionCount, wordCount)
- writeBigArray(values[:n1])
- writeSmallArray(values[n1:regionCount])
- if n2 > regionCount: # Padding
- writer.writeSmallArray([0] * (n2 - regionCount))
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.simpletag(name, attrs + [("value", value)])
+ xmlWriter.newline()
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.simpletag(name, attrs + [("value", value)])
- xmlWriter.newline()
+ def xmlRead(self, attrs, content, font):
+ return safeEval(attrs["value"])
- def xmlRead(self, attrs, content, font):
- return safeEval(attrs["value"])
class LookupFlag(UShort):
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.simpletag(name, attrs + [("value", value)])
- flags = []
- if value & 0x01: flags.append("rightToLeft")
- if value & 0x02: flags.append("ignoreBaseGlyphs")
- if value & 0x04: flags.append("ignoreLigatures")
- if value & 0x08: flags.append("ignoreMarks")
- if value & 0x10: flags.append("useMarkFilteringSet")
- if value & 0xff00: flags.append("markAttachmentType[%i]" % (value >> 8))
- if flags:
- xmlWriter.comment(" ".join(flags))
- xmlWriter.newline()
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.simpletag(name, attrs + [("value", value)])
+ flags = []
+ if value & 0x01:
+ flags.append("rightToLeft")
+ if value & 0x02:
+ flags.append("ignoreBaseGlyphs")
+ if value & 0x04:
+ flags.append("ignoreLigatures")
+ if value & 0x08:
+ flags.append("ignoreMarks")
+ if value & 0x10:
+ flags.append("useMarkFilteringSet")
+ if value & 0xFF00:
+ flags.append("markAttachmentType[%i]" % (value >> 8))
+ if flags:
+ xmlWriter.comment(" ".join(flags))
+ xmlWriter.newline()
class _UInt8Enum(UInt8):
- enumClass = NotImplemented
+ enumClass = NotImplemented
+
+ def read(self, reader, font, tableDict):
+ return self.enumClass(super().read(reader, font, tableDict))
+
+ @classmethod
+ def fromString(cls, value):
+ return getattr(cls.enumClass, value.upper())
- def read(self, reader, font, tableDict):
- return self.enumClass(super().read(reader, font, tableDict))
- @classmethod
- def fromString(cls, value):
- return getattr(cls.enumClass, value.upper())
- @classmethod
- def toString(cls, value):
- return cls.enumClass(value).name.lower()
+ @classmethod
+ def toString(cls, value):
+ return cls.enumClass(value).name.lower()
class ExtendMode(_UInt8Enum):
- enumClass = _ExtendMode
+ enumClass = _ExtendMode
class CompositeMode(_UInt8Enum):
- enumClass = _CompositeMode
+ enumClass = _CompositeMode
converterMapping = {
- # type class
- "int8": Int8,
- "int16": Short,
- "uint8": UInt8,
- "uint16": UShort,
- "uint24": UInt24,
- "uint32": ULong,
- "char64": Char64,
- "Flags32": Flags32,
- "VarIndex": VarIndex,
- "Version": Version,
- "Tag": Tag,
- "GlyphID": GlyphID,
- "GlyphID32": GlyphID32,
- "NameID": NameID,
- "DeciPoints": DeciPoints,
- "Fixed": Fixed,
- "F2Dot14": F2Dot14,
- "Angle": Angle,
- "BiasedAngle": BiasedAngle,
- "struct": Struct,
- "Offset": Table,
- "LOffset": LTable,
- "Offset24": Table24,
- "ValueRecord": ValueRecord,
- "DeltaValue": DeltaValue,
- "VarIdxMapValue": VarIdxMapValue,
- "VarDataValue": VarDataValue,
- "LookupFlag": LookupFlag,
- "ExtendMode": ExtendMode,
- "CompositeMode": CompositeMode,
- "STATFlags": STATFlags,
-
- # AAT
- "CIDGlyphMap": CIDGlyphMap,
- "GlyphCIDMap": GlyphCIDMap,
- "MortChain": StructWithLength,
- "MortSubtable": StructWithLength,
- "MorxChain": StructWithLength,
- "MorxSubtable": MorxSubtableConverter,
-
- # "Template" types
- "AATLookup": lambda C: partial(AATLookup, tableClass=C),
- "AATLookupWithDataOffset": lambda C: partial(AATLookupWithDataOffset, tableClass=C),
- "STXHeader": lambda C: partial(STXHeader, tableClass=C),
- "OffsetTo": lambda C: partial(Table, tableClass=C),
- "LOffsetTo": lambda C: partial(LTable, tableClass=C),
- "LOffset24To": lambda C: partial(Table24, tableClass=C),
+ # type class
+ "int8": Int8,
+ "int16": Short,
+ "uint8": UInt8,
+ "uint16": UShort,
+ "uint24": UInt24,
+ "uint32": ULong,
+ "char64": Char64,
+ "Flags32": Flags32,
+ "VarIndex": VarIndex,
+ "Version": Version,
+ "Tag": Tag,
+ "GlyphID": GlyphID,
+ "GlyphID32": GlyphID32,
+ "NameID": NameID,
+ "DeciPoints": DeciPoints,
+ "Fixed": Fixed,
+ "F2Dot14": F2Dot14,
+ "Angle": Angle,
+ "BiasedAngle": BiasedAngle,
+ "struct": Struct,
+ "Offset": Table,
+ "LOffset": LTable,
+ "Offset24": Table24,
+ "ValueRecord": ValueRecord,
+ "DeltaValue": DeltaValue,
+ "VarIdxMapValue": VarIdxMapValue,
+ "VarDataValue": VarDataValue,
+ "LookupFlag": LookupFlag,
+ "ExtendMode": ExtendMode,
+ "CompositeMode": CompositeMode,
+ "STATFlags": STATFlags,
+ # AAT
+ "CIDGlyphMap": CIDGlyphMap,
+ "GlyphCIDMap": GlyphCIDMap,
+ "MortChain": StructWithLength,
+ "MortSubtable": StructWithLength,
+ "MorxChain": StructWithLength,
+ "MorxSubtable": MorxSubtableConverter,
+ # "Template" types
+ "AATLookup": lambda C: partial(AATLookup, tableClass=C),
+ "AATLookupWithDataOffset": lambda C: partial(AATLookupWithDataOffset, tableClass=C),
+ "STXHeader": lambda C: partial(STXHeader, tableClass=C),
+ "OffsetTo": lambda C: partial(Table, tableClass=C),
+ "LOffsetTo": lambda C: partial(LTable, tableClass=C),
+ "LOffset24To": lambda C: partial(Table24, tableClass=C),
}
diff --git a/Lib/fontTools/ttLib/tables/otData.py b/Lib/fontTools/ttLib/tables/otData.py
index 2e65869f..56716824 100755..100644
--- a/Lib/fontTools/ttLib/tables/otData.py
+++ b/Lib/fontTools/ttLib/tables/otData.py
@@ -1,1957 +1,6236 @@
otData = [
-
- #
- # common
- #
-
- ('LookupOrder', []),
-
- ('ScriptList', [
- ('uint16', 'ScriptCount', None, None, 'Number of ScriptRecords'),
- ('struct', 'ScriptRecord', 'ScriptCount', 0, 'Array of ScriptRecords -listed alphabetically by ScriptTag'),
- ]),
-
- ('ScriptRecord', [
- ('Tag', 'ScriptTag', None, None, '4-byte ScriptTag identifier'),
- ('Offset', 'Script', None, None, 'Offset to Script table-from beginning of ScriptList'),
- ]),
-
- ('Script', [
- ('Offset', 'DefaultLangSys', None, None, 'Offset to DefaultLangSys table-from beginning of Script table-may be NULL'),
- ('uint16', 'LangSysCount', None, None, 'Number of LangSysRecords for this script-excluding the DefaultLangSys'),
- ('struct', 'LangSysRecord', 'LangSysCount', 0, 'Array of LangSysRecords-listed alphabetically by LangSysTag'),
- ]),
-
- ('LangSysRecord', [
- ('Tag', 'LangSysTag', None, None, '4-byte LangSysTag identifier'),
- ('Offset', 'LangSys', None, None, 'Offset to LangSys table-from beginning of Script table'),
- ]),
-
- ('LangSys', [
- ('Offset', 'LookupOrder', None, None, '= NULL (reserved for an offset to a reordering table)'),
- ('uint16', 'ReqFeatureIndex', None, None, 'Index of a feature required for this language system- if no required features = 0xFFFF'),
- ('uint16', 'FeatureCount', None, None, 'Number of FeatureIndex values for this language system-excludes the required feature'),
- ('uint16', 'FeatureIndex', 'FeatureCount', 0, 'Array of indices into the FeatureList-in arbitrary order'),
- ]),
-
- ('FeatureList', [
- ('uint16', 'FeatureCount', None, None, 'Number of FeatureRecords in this table'),
- ('struct', 'FeatureRecord', 'FeatureCount', 0, 'Array of FeatureRecords-zero-based (first feature has FeatureIndex = 0)-listed alphabetically by FeatureTag'),
- ]),
-
- ('FeatureRecord', [
- ('Tag', 'FeatureTag', None, None, '4-byte feature identification tag'),
- ('Offset', 'Feature', None, None, 'Offset to Feature table-from beginning of FeatureList'),
- ]),
-
- ('Feature', [
- ('Offset', 'FeatureParams', None, None, '= NULL (reserved for offset to FeatureParams)'),
- ('uint16', 'LookupCount', None, None, 'Number of LookupList indices for this feature'),
- ('uint16', 'LookupListIndex', 'LookupCount', 0, 'Array of LookupList indices for this feature -zero-based (first lookup is LookupListIndex = 0)'),
- ]),
-
- ('FeatureParams', [
- ]),
-
- ('FeatureParamsSize', [
- ('DeciPoints', 'DesignSize', None, None, 'The design size in 720/inch units (decipoints).'),
- ('uint16', 'SubfamilyID', None, None, 'Serves as an identifier that associates fonts in a subfamily.'),
- ('NameID', 'SubfamilyNameID', None, None, 'Subfamily NameID.'),
- ('DeciPoints', 'RangeStart', None, None, 'Small end of recommended usage range (exclusive) in 720/inch units.'),
- ('DeciPoints', 'RangeEnd', None, None, 'Large end of recommended usage range (inclusive) in 720/inch units.'),
- ]),
-
- ('FeatureParamsStylisticSet', [
- ('uint16', 'Version', None, None, 'Set to 0.'),
- ('NameID', 'UINameID', None, None, 'UI NameID.'),
- ]),
-
- ('FeatureParamsCharacterVariants', [
- ('uint16', 'Format', None, None, 'Set to 0.'),
- ('NameID', 'FeatUILabelNameID', None, None, 'Feature UI label NameID.'),
- ('NameID', 'FeatUITooltipTextNameID', None, None, 'Feature UI tooltip text NameID.'),
- ('NameID', 'SampleTextNameID', None, None, 'Sample text NameID.'),
- ('uint16', 'NumNamedParameters', None, None, 'Number of named parameters.'),
- ('NameID', 'FirstParamUILabelNameID', None, None, 'First NameID of UI feature parameters.'),
- ('uint16', 'CharCount', None, None, 'Count of characters this feature provides glyph variants for.'),
- ('uint24', 'Character', 'CharCount', 0, 'Unicode characters for which this feature provides glyph variants.'),
- ]),
-
- ('LookupList', [
- ('uint16', 'LookupCount', None, None, 'Number of lookups in this table'),
- ('Offset', 'Lookup', 'LookupCount', 0, 'Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)'),
- ]),
-
- ('Lookup', [
- ('uint16', 'LookupType', None, None, 'Different enumerations for GSUB and GPOS'),
- ('LookupFlag', 'LookupFlag', None, None, 'Lookup qualifiers'),
- ('uint16', 'SubTableCount', None, None, 'Number of SubTables for this lookup'),
- ('Offset', 'SubTable', 'SubTableCount', 0, 'Array of offsets to SubTables-from beginning of Lookup table'),
- ('uint16', 'MarkFilteringSet', None, 'LookupFlag & 0x0010', 'If set, indicates that the lookup table structure is followed by a MarkFilteringSet field. The layout engine skips over all mark glyphs not in the mark filtering set indicated.'),
- ]),
-
- ('CoverageFormat1', [
- ('uint16', 'CoverageFormat', None, None, 'Format identifier-format = 1'),
- ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the GlyphArray'),
- ('GlyphID', 'GlyphArray', 'GlyphCount', 0, 'Array of GlyphIDs-in numerical order'),
- ]),
-
- ('CoverageFormat2', [
- ('uint16', 'CoverageFormat', None, None, 'Format identifier-format = 2'),
- ('uint16', 'RangeCount', None, None, 'Number of RangeRecords'),
- ('struct', 'RangeRecord', 'RangeCount', 0, 'Array of glyph ranges-ordered by Start GlyphID'),
- ]),
-
- ('RangeRecord', [
- ('GlyphID', 'Start', None, None, 'First GlyphID in the range'),
- ('GlyphID', 'End', None, None, 'Last GlyphID in the range'),
- ('uint16', 'StartCoverageIndex', None, None, 'Coverage Index of first GlyphID in range'),
- ]),
-
- ('ClassDefFormat1', [
- ('uint16', 'ClassFormat', None, None, 'Format identifier-format = 1'),
- ('GlyphID', 'StartGlyph', None, None, 'First GlyphID of the ClassValueArray'),
- ('uint16', 'GlyphCount', None, None, 'Size of the ClassValueArray'),
- ('uint16', 'ClassValueArray', 'GlyphCount', 0, 'Array of Class Values-one per GlyphID'),
- ]),
-
- ('ClassDefFormat2', [
- ('uint16', 'ClassFormat', None, None, 'Format identifier-format = 2'),
- ('uint16', 'ClassRangeCount', None, None, 'Number of ClassRangeRecords'),
- ('struct', 'ClassRangeRecord', 'ClassRangeCount', 0, 'Array of ClassRangeRecords-ordered by Start GlyphID'),
- ]),
-
- ('ClassRangeRecord', [
- ('GlyphID', 'Start', None, None, 'First GlyphID in the range'),
- ('GlyphID', 'End', None, None, 'Last GlyphID in the range'),
- ('uint16', 'Class', None, None, 'Applied to all glyphs in the range'),
- ]),
-
- ('Device', [
- ('uint16', 'StartSize', None, None, 'Smallest size to correct-in ppem'),
- ('uint16', 'EndSize', None, None, 'Largest size to correct-in ppem'),
- ('uint16', 'DeltaFormat', None, None, 'Format of DeltaValue array data: 1, 2, or 3'),
- ('DeltaValue', 'DeltaValue', '', 'DeltaFormat in (1,2,3)', 'Array of compressed data'),
- ]),
-
-
- #
- # gpos
- #
-
- ('GPOS', [
- ('Version', 'Version', None, None, 'Version of the GPOS table- 0x00010000 or 0x00010001'),
- ('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GPOS table'),
- ('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GPOS table'),
- ('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GPOS table'),
- ('LOffset', 'FeatureVariations', None, 'Version >= 0x00010001', 'Offset to FeatureVariations table-from beginning of GPOS table'),
- ]),
-
- ('SinglePosFormat1', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of SinglePos subtable'),
- ('uint16', 'ValueFormat', None, None, 'Defines the types of data in the ValueRecord'),
- ('ValueRecord', 'Value', None, None, 'Defines positioning value(s)-applied to all glyphs in the Coverage table'),
- ]),
-
- ('SinglePosFormat2', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of SinglePos subtable'),
- ('uint16', 'ValueFormat', None, None, 'Defines the types of data in the ValueRecord'),
- ('uint16', 'ValueCount', None, None, 'Number of ValueRecords'),
- ('ValueRecord', 'Value', 'ValueCount', 0, 'Array of ValueRecords-positioning values applied to glyphs'),
- ]),
-
- ('PairPosFormat1', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of PairPos subtable-only the first glyph in each pair'),
- ('uint16', 'ValueFormat1', None, None, 'Defines the types of data in ValueRecord1-for the first glyph in the pair -may be zero (0)'),
- ('uint16', 'ValueFormat2', None, None, 'Defines the types of data in ValueRecord2-for the second glyph in the pair -may be zero (0)'),
- ('uint16', 'PairSetCount', None, None, 'Number of PairSet tables'),
- ('Offset', 'PairSet', 'PairSetCount', 0, 'Array of offsets to PairSet tables-from beginning of PairPos subtable-ordered by Coverage Index'),
- ]),
-
- ('PairSet', [
- ('uint16', 'PairValueCount', None, None, 'Number of PairValueRecords'),
- ('struct', 'PairValueRecord', 'PairValueCount', 0, 'Array of PairValueRecords-ordered by GlyphID of the second glyph'),
- ]),
-
- ('PairValueRecord', [
- ('GlyphID', 'SecondGlyph', None, None, 'GlyphID of second glyph in the pair-first glyph is listed in the Coverage table'),
- ('ValueRecord', 'Value1', None, None, 'Positioning data for the first glyph in the pair'),
- ('ValueRecord', 'Value2', None, None, 'Positioning data for the second glyph in the pair'),
- ]),
-
- ('PairPosFormat2', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of PairPos subtable-for the first glyph of the pair'),
- ('uint16', 'ValueFormat1', None, None, 'ValueRecord definition-for the first glyph of the pair-may be zero (0)'),
- ('uint16', 'ValueFormat2', None, None, 'ValueRecord definition-for the second glyph of the pair-may be zero (0)'),
- ('Offset', 'ClassDef1', None, None, 'Offset to ClassDef table-from beginning of PairPos subtable-for the first glyph of the pair'),
- ('Offset', 'ClassDef2', None, None, 'Offset to ClassDef table-from beginning of PairPos subtable-for the second glyph of the pair'),
- ('uint16', 'Class1Count', None, None, 'Number of classes in ClassDef1 table-includes Class0'),
- ('uint16', 'Class2Count', None, None, 'Number of classes in ClassDef2 table-includes Class0'),
- ('struct', 'Class1Record', 'Class1Count', 0, 'Array of Class1 records-ordered by Class1'),
- ]),
-
- ('Class1Record', [
- ('struct', 'Class2Record', 'Class2Count', 0, 'Array of Class2 records-ordered by Class2'),
- ]),
-
- ('Class2Record', [
- ('ValueRecord', 'Value1', None, None, 'Positioning for first glyph-empty if ValueFormat1 = 0'),
- ('ValueRecord', 'Value2', None, None, 'Positioning for second glyph-empty if ValueFormat2 = 0'),
- ]),
-
- ('CursivePosFormat1', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of CursivePos subtable'),
- ('uint16', 'EntryExitCount', None, None, 'Number of EntryExit records'),
- ('struct', 'EntryExitRecord', 'EntryExitCount', 0, 'Array of EntryExit records-in Coverage Index order'),
- ]),
-
- ('EntryExitRecord', [
- ('Offset', 'EntryAnchor', None, None, 'Offset to EntryAnchor table-from beginning of CursivePos subtable-may be NULL'),
- ('Offset', 'ExitAnchor', None, None, 'Offset to ExitAnchor table-from beginning of CursivePos subtable-may be NULL'),
- ]),
-
- ('MarkBasePosFormat1', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'MarkCoverage', None, None, 'Offset to MarkCoverage table-from beginning of MarkBasePos subtable'),
- ('Offset', 'BaseCoverage', None, None, 'Offset to BaseCoverage table-from beginning of MarkBasePos subtable'),
- ('uint16', 'ClassCount', None, None, 'Number of classes defined for marks'),
- ('Offset', 'MarkArray', None, None, 'Offset to MarkArray table-from beginning of MarkBasePos subtable'),
- ('Offset', 'BaseArray', None, None, 'Offset to BaseArray table-from beginning of MarkBasePos subtable'),
- ]),
-
- ('BaseArray', [
- ('uint16', 'BaseCount', None, None, 'Number of BaseRecords'),
- ('struct', 'BaseRecord', 'BaseCount', 0, 'Array of BaseRecords-in order of BaseCoverage Index'),
- ]),
-
- ('BaseRecord', [
- ('Offset', 'BaseAnchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of BaseArray table-ordered by class-zero-based'),
- ]),
-
- ('MarkLigPosFormat1', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'MarkCoverage', None, None, 'Offset to Mark Coverage table-from beginning of MarkLigPos subtable'),
- ('Offset', 'LigatureCoverage', None, None, 'Offset to Ligature Coverage table-from beginning of MarkLigPos subtable'),
- ('uint16', 'ClassCount', None, None, 'Number of defined mark classes'),
- ('Offset', 'MarkArray', None, None, 'Offset to MarkArray table-from beginning of MarkLigPos subtable'),
- ('Offset', 'LigatureArray', None, None, 'Offset to LigatureArray table-from beginning of MarkLigPos subtable'),
- ]),
-
- ('LigatureArray', [
- ('uint16', 'LigatureCount', None, None, 'Number of LigatureAttach table offsets'),
- ('Offset', 'LigatureAttach', 'LigatureCount', 0, 'Array of offsets to LigatureAttach tables-from beginning of LigatureArray table-ordered by LigatureCoverage Index'),
- ]),
-
- ('LigatureAttach', [
- ('uint16', 'ComponentCount', None, None, 'Number of ComponentRecords in this ligature'),
- ('struct', 'ComponentRecord', 'ComponentCount', 0, 'Array of Component records-ordered in writing direction'),
- ]),
-
- ('ComponentRecord', [
- ('Offset', 'LigatureAnchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of LigatureAttach table-ordered by class-NULL if a component does not have an attachment for a class-zero-based array'),
- ]),
-
- ('MarkMarkPosFormat1', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Mark1Coverage', None, None, 'Offset to Combining Mark Coverage table-from beginning of MarkMarkPos subtable'),
- ('Offset', 'Mark2Coverage', None, None, 'Offset to Base Mark Coverage table-from beginning of MarkMarkPos subtable'),
- ('uint16', 'ClassCount', None, None, 'Number of Combining Mark classes defined'),
- ('Offset', 'Mark1Array', None, None, 'Offset to MarkArray table for Mark1-from beginning of MarkMarkPos subtable'),
- ('Offset', 'Mark2Array', None, None, 'Offset to Mark2Array table for Mark2-from beginning of MarkMarkPos subtable'),
- ]),
-
- ('Mark2Array', [
- ('uint16', 'Mark2Count', None, None, 'Number of Mark2 records'),
- ('struct', 'Mark2Record', 'Mark2Count', 0, 'Array of Mark2 records-in Coverage order'),
- ]),
-
- ('Mark2Record', [
- ('Offset', 'Mark2Anchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of Mark2Array table-zero-based array'),
- ]),
-
- ('PosLookupRecord', [
- ('uint16', 'SequenceIndex', None, None, 'Index to input glyph sequence-first glyph = 0'),
- ('uint16', 'LookupListIndex', None, None, 'Lookup to apply to that position-zero-based'),
- ]),
-
- ('ContextPosFormat1', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'),
- ('uint16', 'PosRuleSetCount', None, None, 'Number of PosRuleSet tables'),
- ('Offset', 'PosRuleSet', 'PosRuleSetCount', 0, 'Array of offsets to PosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index'),
- ]),
-
- ('PosRuleSet', [
- ('uint16', 'PosRuleCount', None, None, 'Number of PosRule tables'),
- ('Offset', 'PosRule', 'PosRuleCount', 0, 'Array of offsets to PosRule tables-from beginning of PosRuleSet-ordered by preference'),
- ]),
-
- ('PosRule', [
- ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the Input glyph sequence'),
- ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
- ('GlyphID', 'Input', 'GlyphCount', -1, 'Array of input GlyphIDs-starting with the second glyph'),
- ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'),
- ]),
-
- ('ContextPosFormat2', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'),
- ('Offset', 'ClassDef', None, None, 'Offset to ClassDef table-from beginning of ContextPos subtable'),
- ('uint16', 'PosClassSetCount', None, None, 'Number of PosClassSet tables'),
- ('Offset', 'PosClassSet', 'PosClassSetCount', 0, 'Array of offsets to PosClassSet tables-from beginning of ContextPos subtable-ordered by class-may be NULL'),
- ]),
-
- ('PosClassSet', [
- ('uint16', 'PosClassRuleCount', None, None, 'Number of PosClassRule tables'),
- ('Offset', 'PosClassRule', 'PosClassRuleCount', 0, 'Array of offsets to PosClassRule tables-from beginning of PosClassSet-ordered by preference'),
- ]),
-
- ('PosClassRule', [
- ('uint16', 'GlyphCount', None, None, 'Number of glyphs to be matched'),
- ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
- ('uint16', 'Class', 'GlyphCount', -1, 'Array of classes-beginning with the second class-to be matched to the input glyph sequence'),
- ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'),
- ]),
-
- ('ContextPosFormat3', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 3'),
- ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the input sequence'),
- ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
- ('Offset', 'Coverage', 'GlyphCount', 0, 'Array of offsets to Coverage tables-from beginning of ContextPos subtable'),
- ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'),
- ]),
-
- ('ChainContextPosFormat1', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'),
- ('uint16', 'ChainPosRuleSetCount', None, None, 'Number of ChainPosRuleSet tables'),
- ('Offset', 'ChainPosRuleSet', 'ChainPosRuleSetCount', 0, 'Array of offsets to ChainPosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index'),
- ]),
-
- ('ChainPosRuleSet', [
- ('uint16', 'ChainPosRuleCount', None, None, 'Number of ChainPosRule tables'),
- ('Offset', 'ChainPosRule', 'ChainPosRuleCount', 0, 'Array of offsets to ChainPosRule tables-from beginning of ChainPosRuleSet-ordered by preference'),
- ]),
-
- ('ChainPosRule', [
- ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'),
- ('GlyphID', 'Backtrack', 'BacktrackGlyphCount', 0, "Array of backtracking GlyphID's (to be matched before the input sequence)"),
- ('uint16', 'InputGlyphCount', None, None, 'Total number of glyphs in the input sequence (includes the first glyph)'),
- ('GlyphID', 'Input', 'InputGlyphCount', -1, 'Array of input GlyphIDs (start with second glyph)'),
- ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)'),
- ('GlyphID', 'LookAhead', 'LookAheadGlyphCount', 0, "Array of lookahead GlyphID's (to be matched after the input sequence)"),
- ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
- ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords (in design order)'),
- ]),
-
- ('ChainContextPosFormat2', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ChainContextPos subtable'),
- ('Offset', 'BacktrackClassDef', None, None, 'Offset to ClassDef table containing backtrack sequence context-from beginning of ChainContextPos subtable'),
- ('Offset', 'InputClassDef', None, None, 'Offset to ClassDef table containing input sequence context-from beginning of ChainContextPos subtable'),
- ('Offset', 'LookAheadClassDef', None, None, 'Offset to ClassDef table containing lookahead sequence context-from beginning of ChainContextPos subtable'),
- ('uint16', 'ChainPosClassSetCount', None, None, 'Number of ChainPosClassSet tables'),
- ('Offset', 'ChainPosClassSet', 'ChainPosClassSetCount', 0, 'Array of offsets to ChainPosClassSet tables-from beginning of ChainContextPos subtable-ordered by input class-may be NULL'),
- ]),
-
- ('ChainPosClassSet', [
- ('uint16', 'ChainPosClassRuleCount', None, None, 'Number of ChainPosClassRule tables'),
- ('Offset', 'ChainPosClassRule', 'ChainPosClassRuleCount', 0, 'Array of offsets to ChainPosClassRule tables-from beginning of ChainPosClassSet-ordered by preference'),
- ]),
-
- ('ChainPosClassRule', [
- ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'),
- ('uint16', 'Backtrack', 'BacktrackGlyphCount', 0, 'Array of backtracking classes(to be matched before the input sequence)'),
- ('uint16', 'InputGlyphCount', None, None, 'Total number of classes in the input sequence (includes the first class)'),
- ('uint16', 'Input', 'InputGlyphCount', -1, 'Array of input classes(start with second class; to be matched with the input glyph sequence)'),
- ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)'),
- ('uint16', 'LookAhead', 'LookAheadGlyphCount', 0, 'Array of lookahead classes(to be matched after the input sequence)'),
- ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
- ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords (in design order)'),
- ]),
-
- ('ChainContextPosFormat3', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 3'),
- ('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'),
- ('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'),
- ('uint16', 'InputGlyphCount', None, None, 'Number of glyphs in input sequence'),
- ('Offset', 'InputCoverage', 'InputGlyphCount', 0, 'Array of offsets to coverage tables in input sequence, in glyph sequence order'),
- ('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'),
- ('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'),
- ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
- ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords,in design order'),
- ]),
-
- ('ExtensionPosFormat1', [
- ('uint16', 'ExtFormat', None, None, 'Format identifier. Set to 1.'),
- ('uint16', 'ExtensionLookupType', None, None, 'Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).'),
- ('LOffset', 'ExtSubTable', None, None, 'Offset to SubTable'),
- ]),
-
-# ('ValueRecord', [
-# ('int16', 'XPlacement', None, None, 'Horizontal adjustment for placement-in design units'),
-# ('int16', 'YPlacement', None, None, 'Vertical adjustment for placement-in design units'),
-# ('int16', 'XAdvance', None, None, 'Horizontal adjustment for advance-in design units (only used for horizontal writing)'),
-# ('int16', 'YAdvance', None, None, 'Vertical adjustment for advance-in design units (only used for vertical writing)'),
-# ('Offset', 'XPlaDevice', None, None, 'Offset to Device table for horizontal placement-measured from beginning of PosTable (may be NULL)'),
-# ('Offset', 'YPlaDevice', None, None, 'Offset to Device table for vertical placement-measured from beginning of PosTable (may be NULL)'),
-# ('Offset', 'XAdvDevice', None, None, 'Offset to Device table for horizontal advance-measured from beginning of PosTable (may be NULL)'),
-# ('Offset', 'YAdvDevice', None, None, 'Offset to Device table for vertical advance-measured from beginning of PosTable (may be NULL)'),
-# ]),
-
- ('AnchorFormat1', [
- ('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 1'),
- ('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'),
- ('int16', 'YCoordinate', None, None, 'Vertical value-in design units'),
- ]),
-
- ('AnchorFormat2', [
- ('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 2'),
- ('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'),
- ('int16', 'YCoordinate', None, None, 'Vertical value-in design units'),
- ('uint16', 'AnchorPoint', None, None, 'Index to glyph contour point'),
- ]),
-
- ('AnchorFormat3', [
- ('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 3'),
- ('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'),
- ('int16', 'YCoordinate', None, None, 'Vertical value-in design units'),
- ('Offset', 'XDeviceTable', None, None, 'Offset to Device table for X coordinate- from beginning of Anchor table (may be NULL)'),
- ('Offset', 'YDeviceTable', None, None, 'Offset to Device table for Y coordinate- from beginning of Anchor table (may be NULL)'),
- ]),
-
- ('MarkArray', [
- ('uint16', 'MarkCount', None, None, 'Number of MarkRecords'),
- ('struct', 'MarkRecord', 'MarkCount', 0, 'Array of MarkRecords-in Coverage order'),
- ]),
-
- ('MarkRecord', [
- ('uint16', 'Class', None, None, 'Class defined for this mark'),
- ('Offset', 'MarkAnchor', None, None, 'Offset to Anchor table-from beginning of MarkArray table'),
- ]),
-
-
- #
- # gsub
- #
-
- ('GSUB', [
- ('Version', 'Version', None, None, 'Version of the GSUB table- 0x00010000 or 0x00010001'),
- ('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GSUB table'),
- ('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GSUB table'),
- ('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GSUB table'),
- ('LOffset', 'FeatureVariations', None, 'Version >= 0x00010001', 'Offset to FeatureVariations table-from beginning of GSUB table'),
- ]),
-
- ('SingleSubstFormat1', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('uint16', 'DeltaGlyphID', None, None, 'Add to original GlyphID modulo 65536 to get substitute GlyphID'),
- ]),
-
- ('SingleSubstFormat2', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array'),
- ('GlyphID', 'Substitute', 'GlyphCount', 0, 'Array of substitute GlyphIDs-ordered by Coverage Index'),
- ]),
-
- ('MultipleSubstFormat1', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('uint16', 'SequenceCount', None, None, 'Number of Sequence table offsets in the Sequence array'),
- ('Offset', 'Sequence', 'SequenceCount', 0, 'Array of offsets to Sequence tables-from beginning of Substitution table-ordered by Coverage Index'),
- ]),
-
- ('Sequence', [
- ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array. This should always be greater than 0.'),
- ('GlyphID', 'Substitute', 'GlyphCount', 0, 'String of GlyphIDs to substitute'),
- ]),
-
- ('AlternateSubstFormat1', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('uint16', 'AlternateSetCount', None, None, 'Number of AlternateSet tables'),
- ('Offset', 'AlternateSet', 'AlternateSetCount', 0, 'Array of offsets to AlternateSet tables-from beginning of Substitution table-ordered by Coverage Index'),
- ]),
-
- ('AlternateSet', [
- ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Alternate array'),
- ('GlyphID', 'Alternate', 'GlyphCount', 0, 'Array of alternate GlyphIDs-in arbitrary order'),
- ]),
-
- ('LigatureSubstFormat1', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('uint16', 'LigSetCount', None, None, 'Number of LigatureSet tables'),
- ('Offset', 'LigatureSet', 'LigSetCount', 0, 'Array of offsets to LigatureSet tables-from beginning of Substitution table-ordered by Coverage Index'),
- ]),
-
- ('LigatureSet', [
- ('uint16', 'LigatureCount', None, None, 'Number of Ligature tables'),
- ('Offset', 'Ligature', 'LigatureCount', 0, 'Array of offsets to Ligature tables-from beginning of LigatureSet table-ordered by preference'),
- ]),
-
- ('Ligature', [
- ('GlyphID', 'LigGlyph', None, None, 'GlyphID of ligature to substitute'),
- ('uint16', 'CompCount', None, None, 'Number of components in the ligature'),
- ('GlyphID', 'Component', 'CompCount', -1, 'Array of component GlyphIDs-start with the second component-ordered in writing direction'),
- ]),
-
- ('SubstLookupRecord', [
- ('uint16', 'SequenceIndex', None, None, 'Index into current glyph sequence-first glyph = 0'),
- ('uint16', 'LookupListIndex', None, None, 'Lookup to apply to that position-zero-based'),
- ]),
-
- ('ContextSubstFormat1', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('uint16', 'SubRuleSetCount', None, None, 'Number of SubRuleSet tables-must equal GlyphCount in Coverage table'),
- ('Offset', 'SubRuleSet', 'SubRuleSetCount', 0, 'Array of offsets to SubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index'),
- ]),
-
- ('SubRuleSet', [
- ('uint16', 'SubRuleCount', None, None, 'Number of SubRule tables'),
- ('Offset', 'SubRule', 'SubRuleCount', 0, 'Array of offsets to SubRule tables-from beginning of SubRuleSet table-ordered by preference'),
- ]),
-
- ('SubRule', [
- ('uint16', 'GlyphCount', None, None, 'Total number of glyphs in input glyph sequence-includes the first glyph'),
- ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
- ('GlyphID', 'Input', 'GlyphCount', -1, 'Array of input GlyphIDs-start with second glyph'),
- ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords-in design order'),
- ]),
-
- ('ContextSubstFormat2', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('Offset', 'ClassDef', None, None, 'Offset to glyph ClassDef table-from beginning of Substitution table'),
- ('uint16', 'SubClassSetCount', None, None, 'Number of SubClassSet tables'),
- ('Offset', 'SubClassSet', 'SubClassSetCount', 0, 'Array of offsets to SubClassSet tables-from beginning of Substitution table-ordered by class-may be NULL'),
- ]),
-
- ('SubClassSet', [
- ('uint16', 'SubClassRuleCount', None, None, 'Number of SubClassRule tables'),
- ('Offset', 'SubClassRule', 'SubClassRuleCount', 0, 'Array of offsets to SubClassRule tables-from beginning of SubClassSet-ordered by preference'),
- ]),
-
- ('SubClassRule', [
- ('uint16', 'GlyphCount', None, None, 'Total number of classes specified for the context in the rule-includes the first class'),
- ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
- ('uint16', 'Class', 'GlyphCount', -1, 'Array of classes-beginning with the second class-to be matched to the input glyph class sequence'),
- ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of Substitution lookups-in design order'),
- ]),
-
- ('ContextSubstFormat3', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 3'),
- ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the input glyph sequence'),
- ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
- ('Offset', 'Coverage', 'GlyphCount', 0, 'Array of offsets to Coverage table-from beginning of Substitution table-in glyph sequence order'),
- ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords-in design order'),
- ]),
-
- ('ChainContextSubstFormat1', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('uint16', 'ChainSubRuleSetCount', None, None, 'Number of ChainSubRuleSet tables-must equal GlyphCount in Coverage table'),
- ('Offset', 'ChainSubRuleSet', 'ChainSubRuleSetCount', 0, 'Array of offsets to ChainSubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index'),
- ]),
-
- ('ChainSubRuleSet', [
- ('uint16', 'ChainSubRuleCount', None, None, 'Number of ChainSubRule tables'),
- ('Offset', 'ChainSubRule', 'ChainSubRuleCount', 0, 'Array of offsets to ChainSubRule tables-from beginning of ChainSubRuleSet table-ordered by preference'),
- ]),
-
- ('ChainSubRule', [
- ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'),
- ('GlyphID', 'Backtrack', 'BacktrackGlyphCount', 0, "Array of backtracking GlyphID's (to be matched before the input sequence)"),
- ('uint16', 'InputGlyphCount', None, None, 'Total number of glyphs in the input sequence (includes the first glyph)'),
- ('GlyphID', 'Input', 'InputGlyphCount', -1, 'Array of input GlyphIDs (start with second glyph)'),
- ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)'),
- ('GlyphID', 'LookAhead', 'LookAheadGlyphCount', 0, "Array of lookahead GlyphID's (to be matched after the input sequence)"),
- ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
- ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords (in design order)'),
- ]),
-
- ('ChainContextSubstFormat2', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('Offset', 'BacktrackClassDef', None, None, 'Offset to glyph ClassDef table containing backtrack sequence data-from beginning of Substitution table'),
- ('Offset', 'InputClassDef', None, None, 'Offset to glyph ClassDef table containing input sequence data-from beginning of Substitution table'),
- ('Offset', 'LookAheadClassDef', None, None, 'Offset to glyph ClassDef table containing lookahead sequence data-from beginning of Substitution table'),
- ('uint16', 'ChainSubClassSetCount', None, None, 'Number of ChainSubClassSet tables'),
- ('Offset', 'ChainSubClassSet', 'ChainSubClassSetCount', 0, 'Array of offsets to ChainSubClassSet tables-from beginning of Substitution table-ordered by input class-may be NULL'),
- ]),
-
- ('ChainSubClassSet', [
- ('uint16', 'ChainSubClassRuleCount', None, None, 'Number of ChainSubClassRule tables'),
- ('Offset', 'ChainSubClassRule', 'ChainSubClassRuleCount', 0, 'Array of offsets to ChainSubClassRule tables-from beginning of ChainSubClassSet-ordered by preference'),
- ]),
-
- ('ChainSubClassRule', [
- ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'),
- ('uint16', 'Backtrack', 'BacktrackGlyphCount', 0, 'Array of backtracking classes(to be matched before the input sequence)'),
- ('uint16', 'InputGlyphCount', None, None, 'Total number of classes in the input sequence (includes the first class)'),
- ('uint16', 'Input', 'InputGlyphCount', -1, 'Array of input classes(start with second class; to be matched with the input glyph sequence)'),
- ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)'),
- ('uint16', 'LookAhead', 'LookAheadGlyphCount', 0, 'Array of lookahead classes(to be matched after the input sequence)'),
- ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
- ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords (in design order)'),
- ]),
-
- ('ChainContextSubstFormat3', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 3'),
- ('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'),
- ('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'),
- ('uint16', 'InputGlyphCount', None, None, 'Number of glyphs in input sequence'),
- ('Offset', 'InputCoverage', 'InputGlyphCount', 0, 'Array of offsets to coverage tables in input sequence, in glyph sequence order'),
- ('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'),
- ('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'),
- ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
- ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords, in design order'),
- ]),
-
- ('ExtensionSubstFormat1', [
- ('uint16', 'ExtFormat', None, None, 'Format identifier. Set to 1.'),
- ('uint16', 'ExtensionLookupType', None, None, 'Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).'),
- ('LOffset', 'ExtSubTable', None, None, 'Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)'),
- ]),
-
- ('ReverseChainSingleSubstFormat1', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, 0, 'Offset to Coverage table - from beginning of Substitution table'),
- ('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'),
- ('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'),
- ('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'),
- ('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'),
- ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array'),
- ('GlyphID', 'Substitute', 'GlyphCount', 0, 'Array of substitute GlyphIDs-ordered by Coverage index'),
- ]),
-
- #
- # gdef
- #
-
- ('GDEF', [
- ('Version', 'Version', None, None, 'Version of the GDEF table- 0x00010000, 0x00010002, or 0x00010003'),
- ('Offset', 'GlyphClassDef', None, None, 'Offset to class definition table for glyph type-from beginning of GDEF header (may be NULL)'),
- ('Offset', 'AttachList', None, None, 'Offset to list of glyphs with attachment points-from beginning of GDEF header (may be NULL)'),
- ('Offset', 'LigCaretList', None, None, 'Offset to list of positioning points for ligature carets-from beginning of GDEF header (may be NULL)'),
- ('Offset', 'MarkAttachClassDef', None, None, 'Offset to class definition table for mark attachment type-from beginning of GDEF header (may be NULL)'),
- ('Offset', 'MarkGlyphSetsDef', None, 'Version >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'),
- ('LOffset', 'VarStore', None, 'Version >= 0x00010003', 'Offset to variation store (may be NULL)'),
- ]),
-
- ('AttachList', [
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table - from beginning of AttachList table'),
- ('uint16', 'GlyphCount', None, None, 'Number of glyphs with attachment points'),
- ('Offset', 'AttachPoint', 'GlyphCount', 0, 'Array of offsets to AttachPoint tables-from beginning of AttachList table-in Coverage Index order'),
- ]),
-
- ('AttachPoint', [
- ('uint16', 'PointCount', None, None, 'Number of attachment points on this glyph'),
- ('uint16', 'PointIndex', 'PointCount', 0, 'Array of contour point indices -in increasing numerical order'),
- ]),
-
- ('LigCaretList', [
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table - from beginning of LigCaretList table'),
- ('uint16', 'LigGlyphCount', None, None, 'Number of ligature glyphs'),
- ('Offset', 'LigGlyph', 'LigGlyphCount', 0, 'Array of offsets to LigGlyph tables-from beginning of LigCaretList table-in Coverage Index order'),
- ]),
-
- ('LigGlyph', [
- ('uint16', 'CaretCount', None, None, 'Number of CaretValues for this ligature (components - 1)'),
- ('Offset', 'CaretValue', 'CaretCount', 0, 'Array of offsets to CaretValue tables-from beginning of LigGlyph table-in increasing coordinate order'),
- ]),
-
- ('CaretValueFormat1', [
- ('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 1'),
- ('int16', 'Coordinate', None, None, 'X or Y value, in design units'),
- ]),
-
- ('CaretValueFormat2', [
- ('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 2'),
- ('uint16', 'CaretValuePoint', None, None, 'Contour point index on glyph'),
- ]),
-
- ('CaretValueFormat3', [
- ('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 3'),
- ('int16', 'Coordinate', None, None, 'X or Y value, in design units'),
- ('Offset', 'DeviceTable', None, None, 'Offset to Device table for X or Y value-from beginning of CaretValue table'),
- ]),
-
- ('MarkGlyphSetsDef', [
- ('uint16', 'MarkSetTableFormat', None, None, 'Format identifier == 1'),
- ('uint16', 'MarkSetCount', None, None, 'Number of mark sets defined'),
- ('LOffset', 'Coverage', 'MarkSetCount', 0, 'Array of offsets to mark set coverage tables.'),
- ]),
-
- #
- # base
- #
-
- ('BASE', [
- ('Version', 'Version', None, None, 'Version of the BASE table-initially 0x00010000'),
- ('Offset', 'HorizAxis', None, None, 'Offset to horizontal Axis table-from beginning of BASE table-may be NULL'),
- ('Offset', 'VertAxis', None, None, 'Offset to vertical Axis table-from beginning of BASE table-may be NULL'),
- ('LOffset', 'VarStore', None, 'Version >= 0x00010001', 'Offset to variation store (may be NULL)'),
- ]),
-
- ('Axis', [
- ('Offset', 'BaseTagList', None, None, 'Offset to BaseTagList table-from beginning of Axis table-may be NULL'),
- ('Offset', 'BaseScriptList', None, None, 'Offset to BaseScriptList table-from beginning of Axis table'),
- ]),
-
- ('BaseTagList', [
- ('uint16', 'BaseTagCount', None, None, 'Number of baseline identification tags in this text direction-may be zero (0)'),
- ('Tag', 'BaselineTag', 'BaseTagCount', 0, 'Array of 4-byte baseline identification tags-must be in alphabetical order'),
- ]),
-
- ('BaseScriptList', [
- ('uint16', 'BaseScriptCount', None, None, 'Number of BaseScriptRecords defined'),
- ('struct', 'BaseScriptRecord', 'BaseScriptCount', 0, 'Array of BaseScriptRecords-in alphabetical order by BaseScriptTag'),
- ]),
-
- ('BaseScriptRecord', [
- ('Tag', 'BaseScriptTag', None, None, '4-byte script identification tag'),
- ('Offset', 'BaseScript', None, None, 'Offset to BaseScript table-from beginning of BaseScriptList'),
- ]),
-
- ('BaseScript', [
- ('Offset', 'BaseValues', None, None, 'Offset to BaseValues table-from beginning of BaseScript table-may be NULL'),
- ('Offset', 'DefaultMinMax', None, None, 'Offset to MinMax table- from beginning of BaseScript table-may be NULL'),
- ('uint16', 'BaseLangSysCount', None, None, 'Number of BaseLangSysRecords defined-may be zero (0)'),
- ('struct', 'BaseLangSysRecord', 'BaseLangSysCount', 0, 'Array of BaseLangSysRecords-in alphabetical order by BaseLangSysTag'),
- ]),
-
- ('BaseLangSysRecord', [
- ('Tag', 'BaseLangSysTag', None, None, '4-byte language system identification tag'),
- ('Offset', 'MinMax', None, None, 'Offset to MinMax table-from beginning of BaseScript table'),
- ]),
-
- ('BaseValues', [
- ('uint16', 'DefaultIndex', None, None, 'Index number of default baseline for this script-equals index position of baseline tag in BaselineArray of the BaseTagList'),
- ('uint16', 'BaseCoordCount', None, None, 'Number of BaseCoord tables defined-should equal BaseTagCount in the BaseTagList'),
- ('Offset', 'BaseCoord', 'BaseCoordCount', 0, 'Array of offsets to BaseCoord-from beginning of BaseValues table-order matches BaselineTag array in the BaseTagList'),
- ]),
-
- ('MinMax', [
- ('Offset', 'MinCoord', None, None, 'Offset to BaseCoord table-defines minimum extent value-from the beginning of MinMax table-may be NULL'),
- ('Offset', 'MaxCoord', None, None, 'Offset to BaseCoord table-defines maximum extent value-from the beginning of MinMax table-may be NULL'),
- ('uint16', 'FeatMinMaxCount', None, None, 'Number of FeatMinMaxRecords-may be zero (0)'),
- ('struct', 'FeatMinMaxRecord', 'FeatMinMaxCount', 0, 'Array of FeatMinMaxRecords-in alphabetical order, by FeatureTableTag'),
- ]),
-
- ('FeatMinMaxRecord', [
- ('Tag', 'FeatureTableTag', None, None, '4-byte feature identification tag-must match FeatureTag in FeatureList'),
- ('Offset', 'MinCoord', None, None, 'Offset to BaseCoord table-defines minimum extent value-from beginning of MinMax table-may be NULL'),
- ('Offset', 'MaxCoord', None, None, 'Offset to BaseCoord table-defines maximum extent value-from beginning of MinMax table-may be NULL'),
- ]),
-
- ('BaseCoordFormat1', [
- ('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 1'),
- ('int16', 'Coordinate', None, None, 'X or Y value, in design units'),
- ]),
-
- ('BaseCoordFormat2', [
- ('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 2'),
- ('int16', 'Coordinate', None, None, 'X or Y value, in design units'),
- ('GlyphID', 'ReferenceGlyph', None, None, 'GlyphID of control glyph'),
- ('uint16', 'BaseCoordPoint', None, None, 'Index of contour point on the ReferenceGlyph'),
- ]),
-
- ('BaseCoordFormat3', [
- ('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 3'),
- ('int16', 'Coordinate', None, None, 'X or Y value, in design units'),
- ('Offset', 'DeviceTable', None, None, 'Offset to Device table for X or Y value'),
- ]),
-
-
- #
- # jstf
- #
-
- ('JSTF', [
- ('Version', 'Version', None, None, 'Version of the JSTF table-initially set to 0x00010000'),
- ('uint16', 'JstfScriptCount', None, None, 'Number of JstfScriptRecords in this table'),
- ('struct', 'JstfScriptRecord', 'JstfScriptCount', 0, 'Array of JstfScriptRecords-in alphabetical order, by JstfScriptTag'),
- ]),
-
- ('JstfScriptRecord', [
- ('Tag', 'JstfScriptTag', None, None, '4-byte JstfScript identification'),
- ('Offset', 'JstfScript', None, None, 'Offset to JstfScript table-from beginning of JSTF Header'),
- ]),
-
- ('JstfScript', [
- ('Offset', 'ExtenderGlyph', None, None, 'Offset to ExtenderGlyph table-from beginning of JstfScript table-may be NULL'),
- ('Offset', 'DefJstfLangSys', None, None, 'Offset to Default JstfLangSys table-from beginning of JstfScript table-may be NULL'),
- ('uint16', 'JstfLangSysCount', None, None, 'Number of JstfLangSysRecords in this table- may be zero (0)'),
- ('struct', 'JstfLangSysRecord', 'JstfLangSysCount', 0, 'Array of JstfLangSysRecords-in alphabetical order, by JstfLangSysTag'),
- ]),
-
- ('JstfLangSysRecord', [
- ('Tag', 'JstfLangSysTag', None, None, '4-byte JstfLangSys identifier'),
- ('Offset', 'JstfLangSys', None, None, 'Offset to JstfLangSys table-from beginning of JstfScript table'),
- ]),
-
- ('ExtenderGlyph', [
- ('uint16', 'GlyphCount', None, None, 'Number of Extender Glyphs in this script'),
- ('GlyphID', 'ExtenderGlyph', 'GlyphCount', 0, 'GlyphIDs-in increasing numerical order'),
- ]),
-
- ('JstfLangSys', [
- ('uint16', 'JstfPriorityCount', None, None, 'Number of JstfPriority tables'),
- ('Offset', 'JstfPriority', 'JstfPriorityCount', 0, 'Array of offsets to JstfPriority tables-from beginning of JstfLangSys table-in priority order'),
- ]),
-
- ('JstfPriority', [
- ('Offset', 'ShrinkageEnableGSUB', None, None, 'Offset to Shrinkage Enable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'),
- ('Offset', 'ShrinkageDisableGSUB', None, None, 'Offset to Shrinkage Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'),
- ('Offset', 'ShrinkageEnableGPOS', None, None, 'Offset to Shrinkage Enable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL'),
- ('Offset', 'ShrinkageDisableGPOS', None, None, 'Offset to Shrinkage Disable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL'),
- ('Offset', 'ShrinkageJstfMax', None, None, 'Offset to Shrinkage JstfMax table-from beginning of JstfPriority table -may be NULL'),
- ('Offset', 'ExtensionEnableGSUB', None, None, 'Offset to Extension Enable JstfGSUBModList table-may be NULL'),
- ('Offset', 'ExtensionDisableGSUB', None, None, 'Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'),
- ('Offset', 'ExtensionEnableGPOS', None, None, 'Offset to Extension Enable JstfGSUBModList table-may be NULL'),
- ('Offset', 'ExtensionDisableGPOS', None, None, 'Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'),
- ('Offset', 'ExtensionJstfMax', None, None, 'Offset to Extension JstfMax table-from beginning of JstfPriority table -may be NULL'),
- ]),
-
- ('JstfGSUBModList', [
- ('uint16', 'LookupCount', None, None, 'Number of lookups for this modification'),
- ('uint16', 'GSUBLookupIndex', 'LookupCount', 0, 'Array of LookupIndex identifiers in GSUB-in increasing numerical order'),
- ]),
-
- ('JstfGPOSModList', [
- ('uint16', 'LookupCount', None, None, 'Number of lookups for this modification'),
- ('uint16', 'GPOSLookupIndex', 'LookupCount', 0, 'Array of LookupIndex identifiers in GPOS-in increasing numerical order'),
- ]),
-
- ('JstfMax', [
- ('uint16', 'LookupCount', None, None, 'Number of lookup Indices for this modification'),
- ('Offset', 'Lookup', 'LookupCount', 0, 'Array of offsets to GPOS-type lookup tables-from beginning of JstfMax table-in design order'),
- ]),
-
-
- #
- # STAT
- #
- ('STAT', [
- ('Version', 'Version', None, None, 'Version of the table-initially set to 0x00010000, currently 0x00010002.'),
- ('uint16', 'DesignAxisRecordSize', None, None, 'Size in bytes of each design axis record'),
- ('uint16', 'DesignAxisCount', None, None, 'Number of design axis records'),
- ('LOffsetTo(AxisRecordArray)', 'DesignAxisRecord', None, None, 'Offset in bytes from the beginning of the STAT table to the start of the design axes array'),
- ('uint16', 'AxisValueCount', None, None, 'Number of axis value tables'),
- ('LOffsetTo(AxisValueArray)', 'AxisValueArray', None, None, 'Offset in bytes from the beginning of the STAT table to the start of the axes value offset array'),
- ('NameID', 'ElidedFallbackNameID', None, 'Version >= 0x00010001', 'NameID to use when all style attributes are elided.'),
- ]),
-
- ('AxisRecordArray', [
- ('AxisRecord', 'Axis', 'DesignAxisCount', 0, 'Axis records'),
- ]),
-
- ('AxisRecord', [
- ('Tag', 'AxisTag', None, None, 'A tag identifying the axis of design variation'),
- ('NameID', 'AxisNameID', None, None, 'The name ID for entries in the "name" table that provide a display string for this axis'),
- ('uint16', 'AxisOrdering', None, None, 'A value that applications can use to determine primary sorting of face names, or for ordering of descriptors when composing family or face names'),
- ('uint8', 'MoreBytes', 'DesignAxisRecordSize', -8, 'Extra bytes. Set to empty array.'),
- ]),
-
- ('AxisValueArray', [
- ('Offset', 'AxisValue', 'AxisValueCount', 0, 'Axis values'),
- ]),
-
- ('AxisValueFormat1', [
- ('uint16', 'Format', None, None, 'Format, = 1'),
- ('uint16', 'AxisIndex', None, None, 'Index into the axis record array identifying the axis of design variation to which the axis value record applies.'),
- ('STATFlags', 'Flags', None, None, 'Flags.'),
- ('NameID', 'ValueNameID', None, None, ''),
- ('Fixed', 'Value', None, None, ''),
- ]),
-
- ('AxisValueFormat2', [
- ('uint16', 'Format', None, None, 'Format, = 2'),
- ('uint16', 'AxisIndex', None, None, 'Index into the axis record array identifying the axis of design variation to which the axis value record applies.'),
- ('STATFlags', 'Flags', None, None, 'Flags.'),
- ('NameID', 'ValueNameID', None, None, ''),
- ('Fixed', 'NominalValue', None, None, ''),
- ('Fixed', 'RangeMinValue', None, None, ''),
- ('Fixed', 'RangeMaxValue', None, None, ''),
- ]),
-
- ('AxisValueFormat3', [
- ('uint16', 'Format', None, None, 'Format, = 3'),
- ('uint16', 'AxisIndex', None, None, 'Index into the axis record array identifying the axis of design variation to which the axis value record applies.'),
- ('STATFlags', 'Flags', None, None, 'Flags.'),
- ('NameID', 'ValueNameID', None, None, ''),
- ('Fixed', 'Value', None, None, ''),
- ('Fixed', 'LinkedValue', None, None, ''),
- ]),
-
- ('AxisValueFormat4', [
- ('uint16', 'Format', None, None, 'Format, = 4'),
- ('uint16', 'AxisCount', None, None, 'The total number of axes contributing to this axis-values combination.'),
- ('STATFlags', 'Flags', None, None, 'Flags.'),
- ('NameID', 'ValueNameID', None, None, ''),
- ('struct', 'AxisValueRecord', 'AxisCount', 0, 'Array of AxisValue records that provide the combination of axis values, one for each contributing axis. '),
- ]),
-
- ('AxisValueRecord', [
- ('uint16', 'AxisIndex', None, None, 'Index into the axis record array identifying the axis of design variation to which the axis value record applies.'),
- ('Fixed', 'Value', None, None, 'A numeric value for this attribute value.'),
- ]),
-
-
- #
- # Variation fonts
- #
-
- # GSUB/GPOS FeatureVariations
-
- ('FeatureVariations', [
- ('Version', 'Version', None, None, 'Version of the table-initially set to 0x00010000'),
- ('uint32', 'FeatureVariationCount', None, None, 'Number of records in the FeatureVariationRecord array'),
- ('struct', 'FeatureVariationRecord', 'FeatureVariationCount', 0, 'Array of FeatureVariationRecord'),
- ]),
-
- ('FeatureVariationRecord', [
- ('LOffset', 'ConditionSet', None, None, 'Offset to a ConditionSet table, from beginning of the FeatureVariations table.'),
- ('LOffset', 'FeatureTableSubstitution', None, None, 'Offset to a FeatureTableSubstitution table, from beginning of the FeatureVariations table'),
- ]),
-
- ('ConditionSet', [
- ('uint16', 'ConditionCount', None, None, 'Number of condition tables in the ConditionTable array'),
- ('LOffset', 'ConditionTable', 'ConditionCount', 0, 'Array of condition tables.'),
- ]),
-
- ('ConditionTableFormat1', [
- ('uint16', 'Format', None, None, 'Format, = 1'),
- ('uint16', 'AxisIndex', None, None, 'Index for the variation axis within the fvar table, base 0.'),
- ('F2Dot14', 'FilterRangeMinValue', None, None, 'Minimum normalized axis value of the font variation instances that satisfy this condition.'),
- ('F2Dot14', 'FilterRangeMaxValue', None, None, 'Maximum value that satisfies this condition.'),
- ]),
-
- ('FeatureTableSubstitution', [
- ('Version', 'Version', None, None, 'Version of the table-initially set to 0x00010000'),
- ('uint16', 'SubstitutionCount', None, None, 'Number of records in the FeatureVariationRecords array'),
- ('FeatureTableSubstitutionRecord', 'SubstitutionRecord', 'SubstitutionCount', 0, 'Array of FeatureTableSubstitutionRecord'),
- ]),
-
- ('FeatureTableSubstitutionRecord', [
- ('uint16', 'FeatureIndex', None, None, 'The feature table index to match.'),
- ('LOffset', 'Feature', None, None, 'Offset to an alternate feature table, from start of the FeatureTableSubstitution table.'),
- ]),
-
- # VariationStore
-
- ('VarRegionAxis', [
- ('F2Dot14', 'StartCoord', None, None, ''),
- ('F2Dot14', 'PeakCoord', None, None, ''),
- ('F2Dot14', 'EndCoord', None, None, ''),
- ]),
-
- ('VarRegion', [
- ('struct', 'VarRegionAxis', 'RegionAxisCount', 0, ''),
- ]),
-
- ('VarRegionList', [
- ('uint16', 'RegionAxisCount', None, None, ''),
- ('uint16', 'RegionCount', None, None, ''),
- ('VarRegion', 'Region', 'RegionCount', 0, ''),
- ]),
-
- ('VarData', [
- ('uint16', 'ItemCount', None, None, ''),
- ('uint16', 'NumShorts', None, None, ''),
- ('uint16', 'VarRegionCount', None, None, ''),
- ('uint16', 'VarRegionIndex', 'VarRegionCount', 0, ''),
- ('VarDataValue', 'Item', 'ItemCount', 0, ''),
- ]),
-
- ('VarStore', [
- ('uint16', 'Format', None, None, 'Set to 1.'),
- ('LOffset', 'VarRegionList', None, None, ''),
- ('uint16', 'VarDataCount', None, None, ''),
- ('LOffset', 'VarData', 'VarDataCount', 0, ''),
- ]),
-
- # Variation helpers
-
- ('VarIdxMap', [
- ('uint16', 'EntryFormat', None, None, ''), # Automatically computed
- ('uint16', 'MappingCount', None, None, ''), # Automatically computed
- ('VarIdxMapValue', 'mapping', '', 0, 'Array of compressed data'),
- ]),
-
- ('DeltaSetIndexMapFormat0', [
- ('uint8', 'Format', None, None, 'Format of the DeltaSetIndexMap = 0'),
- ('uint8', 'EntryFormat', None, None, ''), # Automatically computed
- ('uint16', 'MappingCount', None, None, ''), # Automatically computed
- ('VarIdxMapValue', 'mapping', '', 0, 'Array of compressed data'),
- ]),
-
- ('DeltaSetIndexMapFormat1', [
- ('uint8', 'Format', None, None, 'Format of the DeltaSetIndexMap = 1'),
- ('uint8', 'EntryFormat', None, None, ''), # Automatically computed
- ('uint32', 'MappingCount', None, None, ''), # Automatically computed
- ('VarIdxMapValue', 'mapping', '', 0, 'Array of compressed data'),
- ]),
-
- # Glyph advance variations
-
- ('HVAR', [
- ('Version', 'Version', None, None, 'Version of the HVAR table-initially = 0x00010000'),
- ('LOffset', 'VarStore', None, None, ''),
- ('LOffsetTo(VarIdxMap)', 'AdvWidthMap', None, None, ''),
- ('LOffsetTo(VarIdxMap)', 'LsbMap', None, None, ''),
- ('LOffsetTo(VarIdxMap)', 'RsbMap', None, None, ''),
- ]),
- ('VVAR', [
- ('Version', 'Version', None, None, 'Version of the VVAR table-initially = 0x00010000'),
- ('LOffset', 'VarStore', None, None, ''),
- ('LOffsetTo(VarIdxMap)', 'AdvHeightMap', None, None, ''),
- ('LOffsetTo(VarIdxMap)', 'TsbMap', None, None, ''),
- ('LOffsetTo(VarIdxMap)', 'BsbMap', None, None, ''),
- ('LOffsetTo(VarIdxMap)', 'VOrgMap', None, None, 'Vertical origin mapping.'),
- ]),
-
- # Font-wide metrics variations
-
- ('MetricsValueRecord', [
- ('Tag', 'ValueTag', None, None, '4-byte font-wide measure identifier'),
- ('uint32', 'VarIdx', None, None, 'Combined outer-inner variation index'),
- ('uint8', 'MoreBytes', 'ValueRecordSize', -8, 'Extra bytes. Set to empty array.'),
- ]),
-
- ('MVAR', [
- ('Version', 'Version', None, None, 'Version of the MVAR table-initially = 0x00010000'),
- ('uint16', 'Reserved', None, None, 'Set to 0'),
- ('uint16', 'ValueRecordSize', None, None, ''),
- ('uint16', 'ValueRecordCount', None, None, ''),
- ('Offset', 'VarStore', None, None, ''),
- ('MetricsValueRecord', 'ValueRecord', 'ValueRecordCount', 0, ''),
- ]),
-
-
- #
- # math
- #
-
- ('MATH', [
- ('Version', 'Version', None, None, 'Version of the MATH table-initially set to 0x00010000.'),
- ('Offset', 'MathConstants', None, None, 'Offset to MathConstants table - from the beginning of MATH table.'),
- ('Offset', 'MathGlyphInfo', None, None, 'Offset to MathGlyphInfo table - from the beginning of MATH table.'),
- ('Offset', 'MathVariants', None, None, 'Offset to MathVariants table - from the beginning of MATH table.'),
- ]),
-
- ('MathValueRecord', [
- ('int16', 'Value', None, None, 'The X or Y value in design units.'),
- ('Offset', 'DeviceTable', None, None, 'Offset to the device table - from the beginning of parent table. May be NULL. Suggested format for device table is 1.'),
- ]),
-
- ('MathConstants', [
- ('int16', 'ScriptPercentScaleDown', None, None, 'Percentage of scaling down for script level 1. Suggested value: 80%.'),
- ('int16', 'ScriptScriptPercentScaleDown', None, None, 'Percentage of scaling down for script level 2 (ScriptScript). Suggested value: 60%.'),
- ('uint16', 'DelimitedSubFormulaMinHeight', None, None, 'Minimum height required for a delimited expression to be treated as a subformula. Suggested value: normal line height x1.5.'),
- ('uint16', 'DisplayOperatorMinHeight', None, None, 'Minimum height of n-ary operators (such as integral and summation) for formulas in display mode.'),
- ('MathValueRecord', 'MathLeading', None, None, 'White space to be left between math formulas to ensure proper line spacing. For example, for applications that treat line gap as a part of line ascender, formulas with ink going above (os2.sTypoAscender + os2.sTypoLineGap - MathLeading) or with ink going below os2.sTypoDescender will result in increasing line height.'),
- ('MathValueRecord', 'AxisHeight', None, None, 'Axis height of the font.'),
- ('MathValueRecord', 'AccentBaseHeight', None, None, 'Maximum (ink) height of accent base that does not require raising the accents. Suggested: x-height of the font (os2.sxHeight) plus any possible overshots.'),
- ('MathValueRecord', 'FlattenedAccentBaseHeight', None, None, 'Maximum (ink) height of accent base that does not require flattening the accents. Suggested: cap height of the font (os2.sCapHeight).'),
- ('MathValueRecord', 'SubscriptShiftDown', None, None, 'The standard shift down applied to subscript elements. Positive for moving in the downward direction. Suggested: os2.ySubscriptYOffset.'),
- ('MathValueRecord', 'SubscriptTopMax', None, None, 'Maximum allowed height of the (ink) top of subscripts that does not require moving subscripts further down. Suggested: 4/5 x-height.'),
- ('MathValueRecord', 'SubscriptBaselineDropMin', None, None, 'Minimum allowed drop of the baseline of subscripts relative to the (ink) bottom of the base. Checked for bases that are treated as a box or extended shape. Positive for subscript baseline dropped below the base bottom.'),
- ('MathValueRecord', 'SuperscriptShiftUp', None, None, 'Standard shift up applied to superscript elements. Suggested: os2.ySuperscriptYOffset.'),
- ('MathValueRecord', 'SuperscriptShiftUpCramped', None, None, 'Standard shift of superscripts relative to the base, in cramped style.'),
- ('MathValueRecord', 'SuperscriptBottomMin', None, None, 'Minimum allowed height of the (ink) bottom of superscripts that does not require moving subscripts further up. Suggested: 1/4 x-height.'),
- ('MathValueRecord', 'SuperscriptBaselineDropMax', None, None, 'Maximum allowed drop of the baseline of superscripts relative to the (ink) top of the base. Checked for bases that are treated as a box or extended shape. Positive for superscript baseline below the base top.'),
- ('MathValueRecord', 'SubSuperscriptGapMin', None, None, 'Minimum gap between the superscript and subscript ink. Suggested: 4x default rule thickness.'),
- ('MathValueRecord', 'SuperscriptBottomMaxWithSubscript', None, None, 'The maximum level to which the (ink) bottom of superscript can be pushed to increase the gap between superscript and subscript, before subscript starts being moved down. Suggested: 4/5 x-height.'),
- ('MathValueRecord', 'SpaceAfterScript', None, None, 'Extra white space to be added after each subscript and superscript. Suggested: 0.5pt for a 12 pt font.'),
- ('MathValueRecord', 'UpperLimitGapMin', None, None, 'Minimum gap between the (ink) bottom of the upper limit, and the (ink) top of the base operator.'),
- ('MathValueRecord', 'UpperLimitBaselineRiseMin', None, None, 'Minimum distance between baseline of upper limit and (ink) top of the base operator.'),
- ('MathValueRecord', 'LowerLimitGapMin', None, None, 'Minimum gap between (ink) top of the lower limit, and (ink) bottom of the base operator.'),
- ('MathValueRecord', 'LowerLimitBaselineDropMin', None, None, 'Minimum distance between baseline of the lower limit and (ink) bottom of the base operator.'),
- ('MathValueRecord', 'StackTopShiftUp', None, None, 'Standard shift up applied to the top element of a stack.'),
- ('MathValueRecord', 'StackTopDisplayStyleShiftUp', None, None, 'Standard shift up applied to the top element of a stack in display style.'),
- ('MathValueRecord', 'StackBottomShiftDown', None, None, 'Standard shift down applied to the bottom element of a stack. Positive for moving in the downward direction.'),
- ('MathValueRecord', 'StackBottomDisplayStyleShiftDown', None, None, 'Standard shift down applied to the bottom element of a stack in display style. Positive for moving in the downward direction.'),
- ('MathValueRecord', 'StackGapMin', None, None, 'Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element. Suggested: 3x default rule thickness.'),
- ('MathValueRecord', 'StackDisplayStyleGapMin', None, None, 'Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element in display style. Suggested: 7x default rule thickness.'),
- ('MathValueRecord', 'StretchStackTopShiftUp', None, None, 'Standard shift up applied to the top element of the stretch stack.'),
- ('MathValueRecord', 'StretchStackBottomShiftDown', None, None, 'Standard shift down applied to the bottom element of the stretch stack. Positive for moving in the downward direction.'),
- ('MathValueRecord', 'StretchStackGapAboveMin', None, None, 'Minimum gap between the ink of the stretched element, and the (ink) bottom of the element above. Suggested: UpperLimitGapMin'),
- ('MathValueRecord', 'StretchStackGapBelowMin', None, None, 'Minimum gap between the ink of the stretched element, and the (ink) top of the element below. Suggested: LowerLimitGapMin.'),
- ('MathValueRecord', 'FractionNumeratorShiftUp', None, None, 'Standard shift up applied to the numerator.'),
- ('MathValueRecord', 'FractionNumeratorDisplayStyleShiftUp', None, None, 'Standard shift up applied to the numerator in display style. Suggested: StackTopDisplayStyleShiftUp.'),
- ('MathValueRecord', 'FractionDenominatorShiftDown', None, None, 'Standard shift down applied to the denominator. Positive for moving in the downward direction.'),
- ('MathValueRecord', 'FractionDenominatorDisplayStyleShiftDown', None, None, 'Standard shift down applied to the denominator in display style. Positive for moving in the downward direction. Suggested: StackBottomDisplayStyleShiftDown.'),
- ('MathValueRecord', 'FractionNumeratorGapMin', None, None, 'Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar. Suggested: default rule thickness'),
- ('MathValueRecord', 'FractionNumDisplayStyleGapMin', None, None, 'Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.'),
- ('MathValueRecord', 'FractionRuleThickness', None, None, 'Thickness of the fraction bar. Suggested: default rule thickness.'),
- ('MathValueRecord', 'FractionDenominatorGapMin', None, None, 'Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar. Suggested: default rule thickness'),
- ('MathValueRecord', 'FractionDenomDisplayStyleGapMin', None, None, 'Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.'),
- ('MathValueRecord', 'SkewedFractionHorizontalGap', None, None, 'Horizontal distance between the top and bottom elements of a skewed fraction.'),
- ('MathValueRecord', 'SkewedFractionVerticalGap', None, None, 'Vertical distance between the ink of the top and bottom elements of a skewed fraction.'),
- ('MathValueRecord', 'OverbarVerticalGap', None, None, 'Distance between the overbar and the (ink) top of he base. Suggested: 3x default rule thickness.'),
- ('MathValueRecord', 'OverbarRuleThickness', None, None, 'Thickness of overbar. Suggested: default rule thickness.'),
- ('MathValueRecord', 'OverbarExtraAscender', None, None, 'Extra white space reserved above the overbar. Suggested: default rule thickness.'),
- ('MathValueRecord', 'UnderbarVerticalGap', None, None, 'Distance between underbar and (ink) bottom of the base. Suggested: 3x default rule thickness.'),
- ('MathValueRecord', 'UnderbarRuleThickness', None, None, 'Thickness of underbar. Suggested: default rule thickness.'),
- ('MathValueRecord', 'UnderbarExtraDescender', None, None, 'Extra white space reserved below the underbar. Always positive. Suggested: default rule thickness.'),
- ('MathValueRecord', 'RadicalVerticalGap', None, None, 'Space between the (ink) top of the expression and the bar over it. Suggested: 1 1/4 default rule thickness.'),
- ('MathValueRecord', 'RadicalDisplayStyleVerticalGap', None, None, 'Space between the (ink) top of the expression and the bar over it. Suggested: default rule thickness + 1/4 x-height.'),
- ('MathValueRecord', 'RadicalRuleThickness', None, None, 'Thickness of the radical rule. This is the thickness of the rule in designed or constructed radical signs. Suggested: default rule thickness.'),
- ('MathValueRecord', 'RadicalExtraAscender', None, None, 'Extra white space reserved above the radical. Suggested: RadicalRuleThickness.'),
- ('MathValueRecord', 'RadicalKernBeforeDegree', None, None, 'Extra horizontal kern before the degree of a radical, if such is present. Suggested: 5/18 of em.'),
- ('MathValueRecord', 'RadicalKernAfterDegree', None, None, 'Negative kern after the degree of a radical, if such is present. Suggested: 10/18 of em.'),
- ('uint16', 'RadicalDegreeBottomRaisePercent', None, None, 'Height of the bottom of the radical degree, if such is present, in proportion to the ascender of the radical sign. Suggested: 60%.'),
- ]),
-
- ('MathGlyphInfo', [
- ('Offset', 'MathItalicsCorrectionInfo', None, None, 'Offset to MathItalicsCorrectionInfo table - from the beginning of MathGlyphInfo table.'),
- ('Offset', 'MathTopAccentAttachment', None, None, 'Offset to MathTopAccentAttachment table - from the beginning of MathGlyphInfo table.'),
- ('Offset', 'ExtendedShapeCoverage', None, None, 'Offset to coverage table for Extended Shape glyphs - from the beginning of MathGlyphInfo table. When the left or right glyph of a box is an extended shape variant, the (ink) box (and not the default position defined by values in MathConstants table) should be used for vertical positioning purposes. May be NULL.'),
- ('Offset', 'MathKernInfo', None, None, 'Offset to MathKernInfo table - from the beginning of MathGlyphInfo table.'),
- ]),
-
- ('MathItalicsCorrectionInfo', [
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table - from the beginning of MathItalicsCorrectionInfo table.'),
- ('uint16', 'ItalicsCorrectionCount', None, None, 'Number of italics correction values. Should coincide with the number of covered glyphs.'),
- ('MathValueRecord', 'ItalicsCorrection', 'ItalicsCorrectionCount', 0, 'Array of MathValueRecords defining italics correction values for each covered glyph.'),
- ]),
-
- ('MathTopAccentAttachment', [
- ('Offset', 'TopAccentCoverage', None, None, 'Offset to Coverage table - from the beginning of MathTopAccentAttachment table.'),
- ('uint16', 'TopAccentAttachmentCount', None, None, 'Number of top accent attachment point values. Should coincide with the number of covered glyphs'),
- ('MathValueRecord', 'TopAccentAttachment', 'TopAccentAttachmentCount', 0, 'Array of MathValueRecords defining top accent attachment points for each covered glyph'),
- ]),
-
- ('MathKernInfo', [
- ('Offset', 'MathKernCoverage', None, None, 'Offset to Coverage table - from the beginning of the MathKernInfo table.'),
- ('uint16', 'MathKernCount', None, None, 'Number of MathKernInfoRecords.'),
- ('MathKernInfoRecord', 'MathKernInfoRecords', 'MathKernCount', 0, 'Array of MathKernInfoRecords, per-glyph information for mathematical positioning of subscripts and superscripts.'),
- ]),
-
- ('MathKernInfoRecord', [
- ('Offset', 'TopRightMathKern', None, None, 'Offset to MathKern table for top right corner - from the beginning of MathKernInfo table. May be NULL.'),
- ('Offset', 'TopLeftMathKern', None, None, 'Offset to MathKern table for the top left corner - from the beginning of MathKernInfo table. May be NULL.'),
- ('Offset', 'BottomRightMathKern', None, None, 'Offset to MathKern table for bottom right corner - from the beginning of MathKernInfo table. May be NULL.'),
- ('Offset', 'BottomLeftMathKern', None, None, 'Offset to MathKern table for bottom left corner - from the beginning of MathKernInfo table. May be NULL.'),
- ]),
-
- ('MathKern', [
- ('uint16', 'HeightCount', None, None, 'Number of heights on which the kern value changes.'),
- ('MathValueRecord', 'CorrectionHeight', 'HeightCount', 0, 'Array of correction heights at which the kern value changes. Sorted by the height value in design units.'),
- ('MathValueRecord', 'KernValue', 'HeightCount', 1, 'Array of kern values corresponding to heights. First value is the kern value for all heights less or equal than the first height in this table.Last value is the value to be applied for all heights greater than the last height in this table. Negative values are interpreted as move glyphs closer to each other.'),
- ]),
-
- ('MathVariants', [
- ('uint16', 'MinConnectorOverlap', None, None, 'Minimum overlap of connecting glyphs during glyph construction, in design units.'),
- ('Offset', 'VertGlyphCoverage', None, None, 'Offset to Coverage table - from the beginning of MathVariants table.'),
- ('Offset', 'HorizGlyphCoverage', None, None, 'Offset to Coverage table - from the beginning of MathVariants table.'),
- ('uint16', 'VertGlyphCount', None, None, 'Number of glyphs for which information is provided for vertically growing variants.'),
- ('uint16', 'HorizGlyphCount', None, None, 'Number of glyphs for which information is provided for horizontally growing variants.'),
- ('Offset', 'VertGlyphConstruction', 'VertGlyphCount', 0, 'Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in vertical direction.'),
- ('Offset', 'HorizGlyphConstruction', 'HorizGlyphCount', 0, 'Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in horizontal direction.'),
- ]),
-
- ('MathGlyphConstruction', [
- ('Offset', 'GlyphAssembly', None, None, 'Offset to GlyphAssembly table for this shape - from the beginning of MathGlyphConstruction table. May be NULL'),
- ('uint16', 'VariantCount', None, None, 'Count of glyph growing variants for this glyph.'),
- ('MathGlyphVariantRecord', 'MathGlyphVariantRecord', 'VariantCount', 0, 'MathGlyphVariantRecords for alternative variants of the glyphs.'),
- ]),
-
- ('MathGlyphVariantRecord', [
- ('GlyphID', 'VariantGlyph', None, None, 'Glyph ID for the variant.'),
- ('uint16', 'AdvanceMeasurement', None, None, 'Advance width/height, in design units, of the variant, in the direction of requested glyph extension.'),
- ]),
-
- ('GlyphAssembly', [
- ('MathValueRecord', 'ItalicsCorrection', None, None, 'Italics correction of this GlyphAssembly. Should not depend on the assembly size.'),
- ('uint16', 'PartCount', None, None, 'Number of parts in this assembly.'),
- ('GlyphPartRecord', 'PartRecords', 'PartCount', 0, 'Array of part records, from left to right and bottom to top.'),
- ]),
-
- ('GlyphPartRecord', [
- ('GlyphID', 'glyph', None, None, 'Glyph ID for the part.'),
- ('uint16', 'StartConnectorLength', None, None, 'Advance width/ height of the straight bar connector material, in design units, is at the beginning of the glyph, in the direction of the extension.'),
- ('uint16', 'EndConnectorLength', None, None, 'Advance width/ height of the straight bar connector material, in design units, is at the end of the glyph, in the direction of the extension.'),
- ('uint16', 'FullAdvance', None, None, 'Full advance width/height for this part, in the direction of the extension. In design units.'),
- ('uint16', 'PartFlags', None, None, 'Part qualifiers. PartFlags enumeration currently uses only one bit: 0x0001 fExtender: If set, the part can be skipped or repeated. 0xFFFE Reserved'),
- ]),
-
-
- ##
- ## Apple Advanced Typography (AAT) tables
- ##
-
- ('AATLookupSegment', [
- ('uint16', 'lastGlyph', None, None, 'Last glyph index in this segment.'),
- ('uint16', 'firstGlyph', None, None, 'First glyph index in this segment.'),
- ('uint16', 'value', None, None, 'A 16-bit offset from the start of the table to the data.'),
- ]),
-
-
- #
- # ankr
- #
-
- ('ankr', [
- ('struct', 'AnchorPoints', None, None, 'Anchor points table.'),
- ]),
-
- ('AnchorPointsFormat0', [
- ('uint16', 'Format', None, None, 'Format of the anchor points table, = 0.'),
- ('uint16', 'Flags', None, None, 'Flags. Currenty unused, set to zero.'),
- ('AATLookupWithDataOffset(AnchorGlyphData)', 'Anchors', None, None, 'Table of with anchor overrides for each glyph.'),
- ]),
-
- ('AnchorGlyphData', [
- ('uint32', 'AnchorPointCount', None, None, 'Number of anchor points for this glyph.'),
- ('struct', 'AnchorPoint', 'AnchorPointCount', 0, 'Individual anchor points.'),
- ]),
-
- ('AnchorPoint', [
- ('int16', 'XCoordinate', None, None, 'X coordinate of this anchor point.'),
- ('int16', 'YCoordinate', None, None, 'Y coordinate of this anchor point.'),
- ]),
-
- #
- # bsln
- #
-
- ('bsln', [
- ('Version', 'Version', None, None, 'Version number of the AAT baseline table (0x00010000 for the initial version).'),
- ('struct', 'Baseline', None, None, 'Baseline table.'),
- ]),
-
- ('BaselineFormat0', [
- ('uint16', 'Format', None, None, 'Format of the baseline table, = 0.'),
- ('uint16', 'DefaultBaseline', None, None, 'Default baseline value for all glyphs. This value can be from 0 through 31.'),
- ('uint16', 'Delta', 32, 0, u'These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.'),
- ]),
-
- ('BaselineFormat1', [
- ('uint16', 'Format', None, None, 'Format of the baseline table, = 1.'),
- ('uint16', 'DefaultBaseline', None, None, 'Default baseline value for all glyphs. This value can be from 0 through 31.'),
- ('uint16', 'Delta', 32, 0, u'These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.'),
- ('AATLookup(uint16)', 'BaselineValues', None, None, 'Lookup table that maps glyphs to their baseline values.'),
- ]),
-
- ('BaselineFormat2', [
- ('uint16', 'Format', None, None, 'Format of the baseline table, = 1.'),
- ('uint16', 'DefaultBaseline', None, None, 'Default baseline value for all glyphs. This value can be from 0 through 31.'),
- ('GlyphID', 'StandardGlyph', None, None, 'Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.'),
- ('uint16', 'ControlPoint', 32, 0, 'Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.'),
- ]),
-
- ('BaselineFormat3', [
- ('uint16', 'Format', None, None, 'Format of the baseline table, = 1.'),
- ('uint16', 'DefaultBaseline', None, None, 'Default baseline value for all glyphs. This value can be from 0 through 31.'),
- ('GlyphID', 'StandardGlyph', None, None, 'Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.'),
- ('uint16', 'ControlPoint', 32, 0, 'Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.'),
- ('AATLookup(uint16)', 'BaselineValues', None, None, 'Lookup table that maps glyphs to their baseline values.'),
- ]),
-
-
- #
- # cidg
- #
-
- ('cidg', [
- ('struct', 'CIDGlyphMapping', None, None, 'CID-to-glyph mapping table.'),
- ]),
-
- ('CIDGlyphMappingFormat0', [
- ('uint16', 'Format', None, None, 'Format of the CID-to-glyph mapping table, = 0.'),
- ('uint16', 'DataFormat', None, None, 'Currenty unused, set to zero.'),
- ('uint32', 'StructLength', None, None, 'Size of the table in bytes.'),
- ('uint16', 'Registry', None, None, 'The registry ID.'),
- ('char64', 'RegistryName', None, None, 'The registry name in ASCII; unused bytes should be set to 0.'),
- ('uint16', 'Order', None, None, 'The order ID.'),
- ('char64', 'OrderName', None, None, 'The order name in ASCII; unused bytes should be set to 0.'),
- ('uint16', 'SupplementVersion', None, None, 'The supplement version.'),
- ('CIDGlyphMap', 'Mapping', None, None, 'A mapping from CIDs to the glyphs in the font, starting with CID 0. If a CID from the identified collection has no glyph in the font, 0xFFFF is used'),
- ]),
-
-
- #
- # feat
- #
-
- ('feat', [
- ('Version', 'Version', None, None, 'Version of the feat table-initially set to 0x00010000.'),
- ('FeatureNames', 'FeatureNames', None, None, 'The feature names.'),
- ]),
-
- ('FeatureNames', [
- ('uint16', 'FeatureNameCount', None, None, 'Number of entries in the feature name array.'),
- ('uint16', 'Reserved1', None, None, 'Reserved (set to zero).'),
- ('uint32', 'Reserved2', None, None, 'Reserved (set to zero).'),
- ('FeatureName', 'FeatureName', 'FeatureNameCount', 0, 'The feature name array.'),
- ]),
-
- ('FeatureName', [
- ('uint16', 'FeatureType', None, None, 'Feature type.'),
- ('uint16', 'SettingsCount', None, None, 'The number of records in the setting name array.'),
- ('LOffset', 'Settings', None, None, 'Offset to setting table for this feature.'),
- ('uint16', 'FeatureFlags', None, None, 'Single-bit flags associated with the feature type.'),
- ('NameID', 'FeatureNameID', None, None, 'The name table index for the feature name.'),
- ]),
-
- ('Settings', [
- ('Setting', 'Setting', 'SettingsCount', 0, 'The setting array.'),
- ]),
-
- ('Setting', [
- ('uint16', 'SettingValue', None, None, 'The setting.'),
- ('NameID', 'SettingNameID', None, None, 'The name table index for the setting name.'),
- ]),
-
-
- #
- # gcid
- #
-
- ('gcid', [
- ('struct', 'GlyphCIDMapping', None, None, 'Glyph to CID mapping table.'),
- ]),
-
- ('GlyphCIDMappingFormat0', [
- ('uint16', 'Format', None, None, 'Format of the glyph-to-CID mapping table, = 0.'),
- ('uint16', 'DataFormat', None, None, 'Currenty unused, set to zero.'),
- ('uint32', 'StructLength', None, None, 'Size of the table in bytes.'),
- ('uint16', 'Registry', None, None, 'The registry ID.'),
- ('char64', 'RegistryName', None, None, 'The registry name in ASCII; unused bytes should be set to 0.'),
- ('uint16', 'Order', None, None, 'The order ID.'),
- ('char64', 'OrderName', None, None, 'The order name in ASCII; unused bytes should be set to 0.'),
- ('uint16', 'SupplementVersion', None, None, 'The supplement version.'),
- ('GlyphCIDMap', 'Mapping', None, None, 'The CIDs for the glyphs in the font, starting with glyph 0. If a glyph does not correspond to a CID in the identified collection, 0xFFFF is used'),
- ]),
-
-
- #
- # lcar
- #
-
- ('lcar', [
- ('Version', 'Version', None, None, 'Version number of the ligature caret table (0x00010000 for the initial version).'),
- ('struct', 'LigatureCarets', None, None, 'Ligature carets table.'),
- ]),
-
- ('LigatureCaretsFormat0', [
- ('uint16', 'Format', None, None, 'Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.'),
- ('AATLookup(LigCaretDistances)', 'Carets', None, None, 'Lookup table associating ligature glyphs with their caret positions, in font unit distances.'),
- ]),
-
- ('LigatureCaretsFormat1', [
- ('uint16', 'Format', None, None, 'Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.'),
- ('AATLookup(LigCaretPoints)', 'Carets', None, None, 'Lookup table associating ligature glyphs with their caret positions, as control points.'),
- ]),
-
- ('LigCaretDistances', [
- ('uint16', 'DivsionPointCount', None, None, 'Number of division points.'),
- ('int16', 'DivisionPoint', 'DivsionPointCount', 0, 'Distance in font units through which a subdivision is made orthogonally to the baseline.'),
- ]),
-
- ('LigCaretPoints', [
- ('uint16', 'DivsionPointCount', None, None, 'Number of division points.'),
- ('int16', 'DivisionPoint', 'DivsionPointCount', 0, 'The number of the control point through which a subdivision is made orthogonally to the baseline.'),
- ]),
-
-
- #
- # mort
- #
-
- ('mort', [
- ('Version', 'Version', None, None, 'Version of the mort table.'),
- ('uint32', 'MorphChainCount', None, None, 'Number of metamorphosis chains.'),
- ('MortChain', 'MorphChain', 'MorphChainCount', 0, 'Array of metamorphosis chains.'),
- ]),
-
- ('MortChain', [
- ('Flags32', 'DefaultFlags', None, None, 'The default specification for subtables.'),
- ('uint32', 'StructLength', None, None, 'Total byte count, including this header; must be a multiple of 4.'),
- ('uint16', 'MorphFeatureCount', None, None, 'Number of metamorphosis feature entries.'),
- ('uint16', 'MorphSubtableCount', None, None, 'The number of subtables in the chain.'),
- ('struct', 'MorphFeature', 'MorphFeatureCount', 0, 'Array of metamorphosis features.'),
- ('MortSubtable', 'MorphSubtable', 'MorphSubtableCount', 0, 'Array of metamorphosis subtables.'),
- ]),
-
- ('MortSubtable', [
- ('uint16', 'StructLength', None, None, 'Total subtable length, including this header.'),
- ('uint8', 'CoverageFlags', None, None, 'Most significant byte of coverage flags.'),
- ('uint8', 'MorphType', None, None, 'Subtable type.'),
- ('Flags32', 'SubFeatureFlags', None, None, 'The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).'),
- ('SubStruct', 'SubStruct', None, None, 'SubTable.'),
- ]),
-
- #
- # morx
- #
-
- ('morx', [
- ('uint16', 'Version', None, None, 'Version of the morx table.'),
- ('uint16', 'Reserved', None, None, 'Reserved (set to zero).'),
- ('uint32', 'MorphChainCount', None, None, 'Number of extended metamorphosis chains.'),
- ('MorxChain', 'MorphChain', 'MorphChainCount', 0, 'Array of extended metamorphosis chains.'),
- ]),
-
- ('MorxChain', [
- ('Flags32', 'DefaultFlags', None, None, 'The default specification for subtables.'),
- ('uint32', 'StructLength', None, None, 'Total byte count, including this header; must be a multiple of 4.'),
- ('uint32', 'MorphFeatureCount', None, None, 'Number of feature subtable entries.'),
- ('uint32', 'MorphSubtableCount', None, None, 'The number of subtables in the chain.'),
- ('MorphFeature', 'MorphFeature', 'MorphFeatureCount', 0, 'Array of metamorphosis features.'),
- ('MorxSubtable', 'MorphSubtable', 'MorphSubtableCount', 0, 'Array of extended metamorphosis subtables.'),
- ]),
-
- ('MorphFeature', [
- ('uint16', 'FeatureType', None, None, 'The type of feature.'),
- ('uint16', 'FeatureSetting', None, None, "The feature's setting (aka selector)."),
- ('Flags32', 'EnableFlags', None, None, 'Flags for the settings that this feature and setting enables.'),
- ('Flags32', 'DisableFlags', None, None, 'Complement of flags for the settings that this feature and setting disable.'),
- ]),
-
- # Apple TrueType Reference Manual, chapter “The ‘morx’ table”,
- # section “Metamorphosis Subtables”.
- # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html
- ('MorxSubtable', [
- ('uint32', 'StructLength', None, None, 'Total subtable length, including this header.'),
- ('uint8', 'CoverageFlags', None, None, 'Most significant byte of coverage flags.'),
- ('uint16', 'Reserved', None, None, 'Unused.'),
- ('uint8', 'MorphType', None, None, 'Subtable type.'),
- ('Flags32', 'SubFeatureFlags', None, None, 'The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).'),
- ('SubStruct', 'SubStruct', None, None, 'SubTable.'),
- ]),
-
- ('StateHeader', [
- ('uint32', 'ClassCount', None, None, 'Number of classes, which is the number of 16-bit entry indices in a single line in the state array.'),
- ('uint32', 'MorphClass', None, None, 'Offset from the start of this state table header to the start of the class table.'),
- ('uint32', 'StateArrayOffset', None, None, 'Offset from the start of this state table header to the start of the state array.'),
- ('uint32', 'EntryTableOffset', None, None, 'Offset from the start of this state table header to the start of the entry table.'),
- ]),
-
- ('RearrangementMorph', [
- ('STXHeader(RearrangementMorphAction)', 'StateTable', None, None, 'Finite-state transducer table for indic rearrangement.'),
- ]),
-
- ('ContextualMorph', [
- ('STXHeader(ContextualMorphAction)', 'StateTable', None, None, 'Finite-state transducer for contextual glyph substitution.'),
- ]),
-
- ('LigatureMorph', [
- ('STXHeader(LigatureMorphAction)', 'StateTable', None, None, 'Finite-state transducer for ligature substitution.'),
- ]),
-
- ('NoncontextualMorph', [
- ('AATLookup(GlyphID)', 'Substitution', None, None, 'The noncontextual glyph substitution table.'),
- ]),
-
- ('InsertionMorph', [
- ('STXHeader(InsertionMorphAction)', 'StateTable', None, None, 'Finite-state transducer for glyph insertion.'),
- ]),
-
- ('MorphClass', [
- ('uint16', 'FirstGlyph', None, None, 'Glyph index of the first glyph in the class table.'),
- #('uint16', 'GlyphCount', None, None, 'Number of glyphs in class table.'),
- #('uint8', 'GlyphClass', 'GlyphCount', 0, 'The class codes (indexed by glyph index minus firstGlyph). Class codes range from 0 to the value of stateSize minus 1.'),
- ]),
-
- # If the 'morx' table version is 3 or greater, then the last subtable in the chain is followed by a subtableGlyphCoverageArray, as described below.
- # ('Offset', 'MarkGlyphSetsDef', None, 'round(Version*0x10000) >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'),
-
-
- #
- # prop
- #
-
- ('prop', [
- ('Fixed', 'Version', None, None, 'Version number of the AAT glyphs property table. Version 1.0 is the initial table version. Version 2.0, which is recognized by macOS 8.5 and later, adds support for the “attaches on right” bit. Version 3.0, which gets recognized by macOS X and iOS, adds support for the additional directional properties defined in Unicode 3.0.'),
- ('struct', 'GlyphProperties', None, None, 'Glyph properties.'),
- ]),
-
- ('GlyphPropertiesFormat0', [
- ('uint16', 'Format', None, None, 'Format, = 0.'),
- ('uint16', 'DefaultProperties', None, None, 'Default properties applied to a glyph. Since there is no lookup table in prop format 0, the default properties get applied to every glyph in the font.'),
- ]),
-
- ('GlyphPropertiesFormat1', [
- ('uint16', 'Format', None, None, 'Format, = 1.'),
- ('uint16', 'DefaultProperties', None, None, 'Default properties applied to a glyph if that glyph is not present in the Properties lookup table.'),
- ('AATLookup(uint16)', 'Properties', None, None, 'Lookup data associating glyphs with their properties.'),
- ]),
-
-
- #
- # opbd
- #
-
- ('opbd', [
- ('Version', 'Version', None, None, 'Version number of the optical bounds table (0x00010000 for the initial version).'),
- ('struct', 'OpticalBounds', None, None, 'Optical bounds table.'),
- ]),
-
- ('OpticalBoundsFormat0', [
- ('uint16', 'Format', None, None, 'Format of the optical bounds table, = 0.'),
- ('AATLookup(OpticalBoundsDeltas)', 'OpticalBoundsDeltas', None, None, 'Lookup table associating glyphs with their optical bounds, given as deltas in font units.'),
- ]),
-
- ('OpticalBoundsFormat1', [
- ('uint16', 'Format', None, None, 'Format of the optical bounds table, = 1.'),
- ('AATLookup(OpticalBoundsPoints)', 'OpticalBoundsPoints', None, None, 'Lookup table associating glyphs with their optical bounds, given as references to control points.'),
- ]),
-
- ('OpticalBoundsDeltas', [
- ('int16', 'Left', None, None, 'Delta value for the left-side optical edge.'),
- ('int16', 'Top', None, None, 'Delta value for the top-side optical edge.'),
- ('int16', 'Right', None, None, 'Delta value for the right-side optical edge.'),
- ('int16', 'Bottom', None, None, 'Delta value for the bottom-side optical edge.'),
- ]),
-
- ('OpticalBoundsPoints', [
- ('int16', 'Left', None, None, 'Control point index for the left-side optical edge, or -1 if this glyph has none.'),
- ('int16', 'Top', None, None, 'Control point index for the top-side optical edge, or -1 if this glyph has none.'),
- ('int16', 'Right', None, None, 'Control point index for the right-side optical edge, or -1 if this glyph has none.'),
- ('int16', 'Bottom', None, None, 'Control point index for the bottom-side optical edge, or -1 if this glyph has none.'),
- ]),
-
- #
- # TSIC
- #
- ('TSIC', [
- ('Version', 'Version', None, None, 'Version of table initially set to 0x00010000.'),
- ('uint16', 'Flags', None, None, 'TSIC flags - set to 0'),
- ('uint16', 'AxisCount', None, None, 'Axis count from fvar'),
- ('uint16', 'RecordCount', None, None, 'TSIC record count'),
- ('uint16', 'Reserved', None, None, 'Set to 0'),
- ('Tag', 'AxisArray', 'AxisCount', 0, 'Array of axis tags in fvar order'),
- ('LocationRecord', 'RecordLocations', 'RecordCount', 0, 'Location in variation space of TSIC record'),
- ('TSICRecord', 'Record', 'RecordCount', 0, 'Array of TSIC records'),
- ]),
-
- ('LocationRecord', [
- ('F2Dot14', 'Axis', 'AxisCount', 0, 'Axis record'),
- ]),
-
- ('TSICRecord', [
- ('uint16', 'Flags', None, None, 'Record flags - set to 0'),
- ('uint16', 'NumCVTEntries', None, None, 'Number of CVT number value pairs'),
- ('uint16', 'NameLength', None, None, 'Length of optional user record name'),
- ('uint16', 'NameArray', 'NameLength', 0, 'Unicode 16 name'),
- ('uint16', 'CVTArray', 'NumCVTEntries', 0, 'CVT number array'),
- ('int16', 'CVTValueArray', 'NumCVTEntries', 0, 'CVT value'),
- ]),
-
- #
- # COLR
- #
-
- ('COLR', [
- ('uint16', 'Version', None, None, 'Table version number (starts at 0).'),
- ('uint16', 'BaseGlyphRecordCount', None, None, 'Number of Base Glyph Records.'),
- ('LOffset', 'BaseGlyphRecordArray', None, None, 'Offset (from beginning of COLR table) to Base Glyph records.'),
- ('LOffset', 'LayerRecordArray', None, None, 'Offset (from beginning of COLR table) to Layer Records.'),
- ('uint16', 'LayerRecordCount', None, None, 'Number of Layer Records.'),
- ('LOffset', 'BaseGlyphList', None, 'Version >= 1', 'Offset (from beginning of COLR table) to array of Version-1 Base Glyph records.'),
- ('LOffset', 'LayerList', None, 'Version >= 1', 'Offset (from beginning of COLR table) to LayerList.'),
- ('LOffset', 'ClipList', None, 'Version >= 1', 'Offset to ClipList table (may be NULL)'),
- ('LOffsetTo(DeltaSetIndexMap)', 'VarIndexMap', None, 'Version >= 1', 'Offset to DeltaSetIndexMap table (may be NULL)'),
- ('LOffset', 'VarStore', None, 'Version >= 1', 'Offset to variation store (may be NULL)'),
- ]),
-
- ('BaseGlyphRecordArray', [
- ('BaseGlyphRecord', 'BaseGlyphRecord', 'BaseGlyphRecordCount', 0, 'Base Glyph records.'),
- ]),
-
- ('BaseGlyphRecord', [
- ('GlyphID', 'BaseGlyph', None, None, 'Glyph ID of reference glyph. This glyph is for reference only and is not rendered for color.'),
- ('uint16', 'FirstLayerIndex', None, None, 'Index (from beginning of the Layer Records) to the layer record. There will be numLayers consecutive entries for this base glyph.'),
- ('uint16', 'NumLayers', None, None, 'Number of color layers associated with this glyph.'),
- ]),
-
- ('LayerRecordArray', [
- ('LayerRecord', 'LayerRecord', 'LayerRecordCount', 0, 'Layer records.'),
- ]),
-
- ('LayerRecord', [
- ('GlyphID', 'LayerGlyph', None, None, 'Glyph ID of layer glyph (must be in z-order from bottom to top).'),
- ('uint16', 'PaletteIndex', None, None, 'Index value to use with a selected color palette.'),
- ]),
-
- ('BaseGlyphList', [
- ('uint32', 'BaseGlyphCount', None, None, 'Number of Version-1 Base Glyph records'),
- ('struct', 'BaseGlyphPaintRecord', 'BaseGlyphCount', 0, 'Array of Version-1 Base Glyph records'),
- ]),
-
- ('BaseGlyphPaintRecord', [
- ('GlyphID', 'BaseGlyph', None, None, 'Glyph ID of reference glyph.'),
- ('LOffset', 'Paint', None, None, 'Offset (from beginning of BaseGlyphPaintRecord) to Paint, typically a PaintColrLayers.'),
- ]),
-
- ('LayerList', [
- ('uint32', 'LayerCount', None, None, 'Number of Version-1 Layers'),
- ('LOffset', 'Paint', 'LayerCount', 0, 'Array of offsets to Paint tables, from the start of the LayerList table.'),
- ]),
-
- ('ClipListFormat1', [
- ('uint8', 'Format', None, None, 'Format for ClipList with 16bit glyph IDs: 1'),
- ('uint32', 'ClipCount', None, None, 'Number of Clip records.'),
- ('struct', 'ClipRecord', 'ClipCount', 0, 'Array of Clip records sorted by glyph ID.'),
- ]),
-
- ('ClipRecord', [
- ('uint16', 'StartGlyphID', None, None, 'First glyph ID in the range.'),
- ('uint16', 'EndGlyphID', None, None, 'Last glyph ID in the range.'),
- ('Offset24', 'ClipBox', None, None, 'Offset to a ClipBox table.'),
- ]),
-
- ('ClipBoxFormat1', [
- ('uint8', 'Format', None, None, 'Format for ClipBox without variation: set to 1.'),
- ('int16', 'xMin', None, None, 'Minimum x of clip box.'),
- ('int16', 'yMin', None, None, 'Minimum y of clip box.'),
- ('int16', 'xMax', None, None, 'Maximum x of clip box.'),
- ('int16', 'yMax', None, None, 'Maximum y of clip box.'),
- ]),
-
- ('ClipBoxFormat2', [
- ('uint8', 'Format', None, None, 'Format for variable ClipBox: set to 2.'),
- ('int16', 'xMin', None, None, 'Minimum x of clip box. VarIndexBase + 0.'),
- ('int16', 'yMin', None, None, 'Minimum y of clip box. VarIndexBase + 1.'),
- ('int16', 'xMax', None, None, 'Maximum x of clip box. VarIndexBase + 2.'),
- ('int16', 'yMax', None, None, 'Maximum y of clip box. VarIndexBase + 3.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # COLRv1 Affine2x3 uses the same column-major order to serialize a 2D
- # Affine Transformation as the one used by fontTools.misc.transform.
- # However, for historical reasons, the labels 'xy' and 'yx' are swapped.
- # Their fundamental meaning is the same though.
- # COLRv1 Affine2x3 follows the names found in FreeType and Cairo.
- # In all case, the second element in the 6-tuple correspond to the
- # y-part of the x basis vector, and the third to the x-part of the y
- # basis vector.
- # See https://github.com/googlefonts/colr-gradients-spec/pull/85
- ('Affine2x3', [
- ('Fixed', 'xx', None, None, 'x-part of x basis vector'),
- ('Fixed', 'yx', None, None, 'y-part of x basis vector'),
- ('Fixed', 'xy', None, None, 'x-part of y basis vector'),
- ('Fixed', 'yy', None, None, 'y-part of y basis vector'),
- ('Fixed', 'dx', None, None, 'Translation in x direction'),
- ('Fixed', 'dy', None, None, 'Translation in y direction'),
- ]),
- ('VarAffine2x3', [
- ('Fixed', 'xx', None, None, 'x-part of x basis vector. VarIndexBase + 0.'),
- ('Fixed', 'yx', None, None, 'y-part of x basis vector. VarIndexBase + 1.'),
- ('Fixed', 'xy', None, None, 'x-part of y basis vector. VarIndexBase + 2.'),
- ('Fixed', 'yy', None, None, 'y-part of y basis vector. VarIndexBase + 3.'),
- ('Fixed', 'dx', None, None, 'Translation in x direction. VarIndexBase + 4.'),
- ('Fixed', 'dy', None, None, 'Translation in y direction. VarIndexBase + 5.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- ('ColorStop', [
- ('F2Dot14', 'StopOffset', None, None, ''),
- ('uint16', 'PaletteIndex', None, None, 'Index for a CPAL palette entry.'),
- ('F2Dot14', 'Alpha', None, None, 'Values outsided [0.,1.] reserved'),
- ]),
- ('VarColorStop', [
- ('F2Dot14', 'StopOffset', None, None, 'VarIndexBase + 0.'),
- ('uint16', 'PaletteIndex', None, None, 'Index for a CPAL palette entry.'),
- ('F2Dot14', 'Alpha', None, None, 'Values outsided [0.,1.] reserved. VarIndexBase + 1.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- ('ColorLine', [
- ('ExtendMode', 'Extend', None, None, 'Enum {PAD = 0, REPEAT = 1, REFLECT = 2}'),
- ('uint16', 'StopCount', None, None, 'Number of Color stops.'),
- ('ColorStop', 'ColorStop', 'StopCount', 0, 'Array of Color stops.'),
- ]),
- ('VarColorLine', [
- ('ExtendMode', 'Extend', None, None, 'Enum {PAD = 0, REPEAT = 1, REFLECT = 2}'),
- ('uint16', 'StopCount', None, None, 'Number of Color stops.'),
- ('VarColorStop', 'ColorStop', 'StopCount', 0, 'Array of Color stops.'),
- ]),
-
- # PaintColrLayers
- ('PaintFormat1', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 1'),
- ('uint8', 'NumLayers', None, None, 'Number of offsets to Paint to read from LayerList.'),
- ('uint32', 'FirstLayerIndex', None, None, 'Index into LayerList.'),
- ]),
-
- # PaintSolid
- ('PaintFormat2', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 2'),
- ('uint16', 'PaletteIndex', None, None, 'Index for a CPAL palette entry.'),
- ('F2Dot14', 'Alpha', None, None, 'Values outsided [0.,1.] reserved'),
- ]),
- # PaintVarSolid
- ('PaintFormat3', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 3'),
- ('uint16', 'PaletteIndex', None, None, 'Index for a CPAL palette entry.'),
- ('F2Dot14', 'Alpha', None, None, 'Values outsided [0.,1.] reserved. VarIndexBase + 0.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintLinearGradient
- ('PaintFormat4', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 4'),
- ('Offset24', 'ColorLine', None, None, 'Offset (from beginning of PaintLinearGradient table) to ColorLine subtable.'),
- ('int16', 'x0', None, None, ''),
- ('int16', 'y0', None, None, ''),
- ('int16', 'x1', None, None, ''),
- ('int16', 'y1', None, None, ''),
- ('int16', 'x2', None, None, ''),
- ('int16', 'y2', None, None, ''),
- ]),
- # PaintVarLinearGradient
- ('PaintFormat5', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 5'),
- ('LOffset24To(VarColorLine)', 'ColorLine', None, None, 'Offset (from beginning of PaintVarLinearGradient table) to VarColorLine subtable.'),
- ('int16', 'x0', None, None, 'VarIndexBase + 0.'),
- ('int16', 'y0', None, None, 'VarIndexBase + 1.'),
- ('int16', 'x1', None, None, 'VarIndexBase + 2.'),
- ('int16', 'y1', None, None, 'VarIndexBase + 3.'),
- ('int16', 'x2', None, None, 'VarIndexBase + 4.'),
- ('int16', 'y2', None, None, 'VarIndexBase + 5.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintRadialGradient
- ('PaintFormat6', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 6'),
- ('Offset24', 'ColorLine', None, None, 'Offset (from beginning of PaintRadialGradient table) to ColorLine subtable.'),
- ('int16', 'x0', None, None, ''),
- ('int16', 'y0', None, None, ''),
- ('uint16', 'r0', None, None, ''),
- ('int16', 'x1', None, None, ''),
- ('int16', 'y1', None, None, ''),
- ('uint16', 'r1', None, None, ''),
- ]),
- # PaintVarRadialGradient
- ('PaintFormat7', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 7'),
- ('LOffset24To(VarColorLine)', 'ColorLine', None, None, 'Offset (from beginning of PaintVarRadialGradient table) to VarColorLine subtable.'),
- ('int16', 'x0', None, None, 'VarIndexBase + 0.'),
- ('int16', 'y0', None, None, 'VarIndexBase + 1.'),
- ('uint16', 'r0', None, None, 'VarIndexBase + 2.'),
- ('int16', 'x1', None, None, 'VarIndexBase + 3.'),
- ('int16', 'y1', None, None, 'VarIndexBase + 4.'),
- ('uint16', 'r1', None, None, 'VarIndexBase + 5.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintSweepGradient
- ('PaintFormat8', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 8'),
- ('Offset24', 'ColorLine', None, None, 'Offset (from beginning of PaintSweepGradient table) to ColorLine subtable.'),
- ('int16', 'centerX', None, None, 'Center x coordinate.'),
- ('int16', 'centerY', None, None, 'Center y coordinate.'),
- ('BiasedAngle', 'startAngle', None, None, 'Start of the angular range of the gradient.'),
- ('BiasedAngle', 'endAngle', None, None, 'End of the angular range of the gradient.'),
- ]),
- # PaintVarSweepGradient
- ('PaintFormat9', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 9'),
- ('LOffset24To(VarColorLine)', 'ColorLine', None, None, 'Offset (from beginning of PaintVarSweepGradient table) to VarColorLine subtable.'),
- ('int16', 'centerX', None, None, 'Center x coordinate. VarIndexBase + 0.'),
- ('int16', 'centerY', None, None, 'Center y coordinate. VarIndexBase + 1.'),
- ('BiasedAngle', 'startAngle', None, None, 'Start of the angular range of the gradient. VarIndexBase + 2.'),
- ('BiasedAngle', 'endAngle', None, None, 'End of the angular range of the gradient. VarIndexBase + 3.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintGlyph
- ('PaintFormat10', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 10'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintGlyph table) to Paint subtable.'),
- ('GlyphID', 'Glyph', None, None, 'Glyph ID for the source outline.'),
- ]),
-
- # PaintColrGlyph
- ('PaintFormat11', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 11'),
- ('GlyphID', 'Glyph', None, None, 'Virtual glyph ID for a BaseGlyphList base glyph.'),
- ]),
-
- # PaintTransform
- ('PaintFormat12', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 12'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintTransform table) to Paint subtable.'),
- ('LOffset24To(Affine2x3)', 'Transform', None, None, '2x3 matrix for 2D affine transformations.'),
- ]),
- # PaintVarTransform
- ('PaintFormat13', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 13'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarTransform table) to Paint subtable.'),
- ('LOffset24To(VarAffine2x3)', 'Transform', None, None, '2x3 matrix for 2D affine transformations.'),
- ]),
-
- # PaintTranslate
- ('PaintFormat14', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 14'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintTranslate table) to Paint subtable.'),
- ('int16', 'dx', None, None, 'Translation in x direction.'),
- ('int16', 'dy', None, None, 'Translation in y direction.'),
- ]),
- # PaintVarTranslate
- ('PaintFormat15', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 15'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarTranslate table) to Paint subtable.'),
- ('int16', 'dx', None, None, 'Translation in x direction. VarIndexBase + 0.'),
- ('int16', 'dy', None, None, 'Translation in y direction. VarIndexBase + 1.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintScale
- ('PaintFormat16', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 16'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintScale table) to Paint subtable.'),
- ('F2Dot14', 'scaleX', None, None, ''),
- ('F2Dot14', 'scaleY', None, None, ''),
- ]),
- # PaintVarScale
- ('PaintFormat17', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 17'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarScale table) to Paint subtable.'),
- ('F2Dot14', 'scaleX', None, None, 'VarIndexBase + 0.'),
- ('F2Dot14', 'scaleY', None, None, 'VarIndexBase + 1.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintScaleAroundCenter
- ('PaintFormat18', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 18'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintScaleAroundCenter table) to Paint subtable.'),
- ('F2Dot14', 'scaleX', None, None, ''),
- ('F2Dot14', 'scaleY', None, None, ''),
- ('int16', 'centerX', None, None, ''),
- ('int16', 'centerY', None, None, ''),
- ]),
- # PaintVarScaleAroundCenter
- ('PaintFormat19', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 19'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarScaleAroundCenter table) to Paint subtable.'),
- ('F2Dot14', 'scaleX', None, None, 'VarIndexBase + 0.'),
- ('F2Dot14', 'scaleY', None, None, 'VarIndexBase + 1.'),
- ('int16', 'centerX', None, None, 'VarIndexBase + 2.'),
- ('int16', 'centerY', None, None, 'VarIndexBase + 3.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintScaleUniform
- ('PaintFormat20', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 20'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintScaleUniform table) to Paint subtable.'),
- ('F2Dot14', 'scale', None, None, ''),
- ]),
- # PaintVarScaleUniform
- ('PaintFormat21', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 21'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarScaleUniform table) to Paint subtable.'),
- ('F2Dot14', 'scale', None, None, 'VarIndexBase + 0.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintScaleUniformAroundCenter
- ('PaintFormat22', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 22'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintScaleUniformAroundCenter table) to Paint subtable.'),
- ('F2Dot14', 'scale', None, None, ''),
- ('int16', 'centerX', None, None, ''),
- ('int16', 'centerY', None, None, ''),
- ]),
- # PaintVarScaleUniformAroundCenter
- ('PaintFormat23', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 23'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarScaleUniformAroundCenter table) to Paint subtable.'),
- ('F2Dot14', 'scale', None, None, 'VarIndexBase + 0'),
- ('int16', 'centerX', None, None, 'VarIndexBase + 1'),
- ('int16', 'centerY', None, None, 'VarIndexBase + 2'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintRotate
- ('PaintFormat24', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 24'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintRotate table) to Paint subtable.'),
- ('Angle', 'angle', None, None, ''),
- ]),
- # PaintVarRotate
- ('PaintFormat25', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 25'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarRotate table) to Paint subtable.'),
- ('Angle', 'angle', None, None, 'VarIndexBase + 0.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintRotateAroundCenter
- ('PaintFormat26', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 26'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintRotateAroundCenter table) to Paint subtable.'),
- ('Angle', 'angle', None, None, ''),
- ('int16', 'centerX', None, None, ''),
- ('int16', 'centerY', None, None, ''),
- ]),
- # PaintVarRotateAroundCenter
- ('PaintFormat27', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 27'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarRotateAroundCenter table) to Paint subtable.'),
- ('Angle', 'angle', None, None, 'VarIndexBase + 0.'),
- ('int16', 'centerX', None, None, 'VarIndexBase + 1.'),
- ('int16', 'centerY', None, None, 'VarIndexBase + 2.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintSkew
- ('PaintFormat28', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 28'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintSkew table) to Paint subtable.'),
- ('Angle', 'xSkewAngle', None, None, ''),
- ('Angle', 'ySkewAngle', None, None, ''),
- ]),
- # PaintVarSkew
- ('PaintFormat29', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 29'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarSkew table) to Paint subtable.'),
- ('Angle', 'xSkewAngle', None, None, 'VarIndexBase + 0.'),
- ('Angle', 'ySkewAngle', None, None, 'VarIndexBase + 1.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintSkewAroundCenter
- ('PaintFormat30', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 30'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintSkewAroundCenter table) to Paint subtable.'),
- ('Angle', 'xSkewAngle', None, None, ''),
- ('Angle', 'ySkewAngle', None, None, ''),
- ('int16', 'centerX', None, None, ''),
- ('int16', 'centerY', None, None, ''),
- ]),
- # PaintVarSkewAroundCenter
- ('PaintFormat31', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 31'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarSkewAroundCenter table) to Paint subtable.'),
- ('Angle', 'xSkewAngle', None, None, 'VarIndexBase + 0.'),
- ('Angle', 'ySkewAngle', None, None, 'VarIndexBase + 1.'),
- ('int16', 'centerX', None, None, 'VarIndexBase + 2.'),
- ('int16', 'centerY', None, None, 'VarIndexBase + 3.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintComposite
- ('PaintFormat32', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 32'),
- ('LOffset24To(Paint)', 'SourcePaint', None, None, 'Offset (from beginning of PaintComposite table) to source Paint subtable.'),
- ('CompositeMode', 'CompositeMode', None, None, 'A CompositeMode enumeration value.'),
- ('LOffset24To(Paint)', 'BackdropPaint', None, None, 'Offset (from beginning of PaintComposite table) to backdrop Paint subtable.'),
- ]),
+ #
+ # common
+ #
+ ("LookupOrder", []),
+ (
+ "ScriptList",
+ [
+ ("uint16", "ScriptCount", None, None, "Number of ScriptRecords"),
+ (
+ "struct",
+ "ScriptRecord",
+ "ScriptCount",
+ 0,
+ "Array of ScriptRecords -listed alphabetically by ScriptTag",
+ ),
+ ],
+ ),
+ (
+ "ScriptRecord",
+ [
+ ("Tag", "ScriptTag", None, None, "4-byte ScriptTag identifier"),
+ (
+ "Offset",
+ "Script",
+ None,
+ None,
+ "Offset to Script table-from beginning of ScriptList",
+ ),
+ ],
+ ),
+ (
+ "Script",
+ [
+ (
+ "Offset",
+ "DefaultLangSys",
+ None,
+ None,
+ "Offset to DefaultLangSys table-from beginning of Script table-may be NULL",
+ ),
+ (
+ "uint16",
+ "LangSysCount",
+ None,
+ None,
+ "Number of LangSysRecords for this script-excluding the DefaultLangSys",
+ ),
+ (
+ "struct",
+ "LangSysRecord",
+ "LangSysCount",
+ 0,
+ "Array of LangSysRecords-listed alphabetically by LangSysTag",
+ ),
+ ],
+ ),
+ (
+ "LangSysRecord",
+ [
+ ("Tag", "LangSysTag", None, None, "4-byte LangSysTag identifier"),
+ (
+ "Offset",
+ "LangSys",
+ None,
+ None,
+ "Offset to LangSys table-from beginning of Script table",
+ ),
+ ],
+ ),
+ (
+ "LangSys",
+ [
+ (
+ "Offset",
+ "LookupOrder",
+ None,
+ None,
+ "= NULL (reserved for an offset to a reordering table)",
+ ),
+ (
+ "uint16",
+ "ReqFeatureIndex",
+ None,
+ None,
+ "Index of a feature required for this language system- if no required features = 0xFFFF",
+ ),
+ (
+ "uint16",
+ "FeatureCount",
+ None,
+ None,
+ "Number of FeatureIndex values for this language system-excludes the required feature",
+ ),
+ (
+ "uint16",
+ "FeatureIndex",
+ "FeatureCount",
+ 0,
+ "Array of indices into the FeatureList-in arbitrary order",
+ ),
+ ],
+ ),
+ (
+ "FeatureList",
+ [
+ (
+ "uint16",
+ "FeatureCount",
+ None,
+ None,
+ "Number of FeatureRecords in this table",
+ ),
+ (
+ "struct",
+ "FeatureRecord",
+ "FeatureCount",
+ 0,
+ "Array of FeatureRecords-zero-based (first feature has FeatureIndex = 0)-listed alphabetically by FeatureTag",
+ ),
+ ],
+ ),
+ (
+ "FeatureRecord",
+ [
+ ("Tag", "FeatureTag", None, None, "4-byte feature identification tag"),
+ (
+ "Offset",
+ "Feature",
+ None,
+ None,
+ "Offset to Feature table-from beginning of FeatureList",
+ ),
+ ],
+ ),
+ (
+ "Feature",
+ [
+ (
+ "Offset",
+ "FeatureParams",
+ None,
+ None,
+ "= NULL (reserved for offset to FeatureParams)",
+ ),
+ (
+ "uint16",
+ "LookupCount",
+ None,
+ None,
+ "Number of LookupList indices for this feature",
+ ),
+ (
+ "uint16",
+ "LookupListIndex",
+ "LookupCount",
+ 0,
+ "Array of LookupList indices for this feature -zero-based (first lookup is LookupListIndex = 0)",
+ ),
+ ],
+ ),
+ ("FeatureParams", []),
+ (
+ "FeatureParamsSize",
+ [
+ (
+ "DeciPoints",
+ "DesignSize",
+ None,
+ None,
+ "The design size in 720/inch units (decipoints).",
+ ),
+ (
+ "uint16",
+ "SubfamilyID",
+ None,
+ None,
+ "Serves as an identifier that associates fonts in a subfamily.",
+ ),
+ ("NameID", "SubfamilyNameID", None, None, "Subfamily NameID."),
+ (
+ "DeciPoints",
+ "RangeStart",
+ None,
+ None,
+ "Small end of recommended usage range (exclusive) in 720/inch units.",
+ ),
+ (
+ "DeciPoints",
+ "RangeEnd",
+ None,
+ None,
+ "Large end of recommended usage range (inclusive) in 720/inch units.",
+ ),
+ ],
+ ),
+ (
+ "FeatureParamsStylisticSet",
+ [
+ ("uint16", "Version", None, None, "Set to 0."),
+ ("NameID", "UINameID", None, None, "UI NameID."),
+ ],
+ ),
+ (
+ "FeatureParamsCharacterVariants",
+ [
+ ("uint16", "Format", None, None, "Set to 0."),
+ ("NameID", "FeatUILabelNameID", None, None, "Feature UI label NameID."),
+ (
+ "NameID",
+ "FeatUITooltipTextNameID",
+ None,
+ None,
+ "Feature UI tooltip text NameID.",
+ ),
+ ("NameID", "SampleTextNameID", None, None, "Sample text NameID."),
+ ("uint16", "NumNamedParameters", None, None, "Number of named parameters."),
+ (
+ "NameID",
+ "FirstParamUILabelNameID",
+ None,
+ None,
+ "First NameID of UI feature parameters.",
+ ),
+ (
+ "uint16",
+ "CharCount",
+ None,
+ None,
+ "Count of characters this feature provides glyph variants for.",
+ ),
+ (
+ "uint24",
+ "Character",
+ "CharCount",
+ 0,
+ "Unicode characters for which this feature provides glyph variants.",
+ ),
+ ],
+ ),
+ (
+ "LookupList",
+ [
+ ("uint16", "LookupCount", None, None, "Number of lookups in this table"),
+ (
+ "Offset",
+ "Lookup",
+ "LookupCount",
+ 0,
+ "Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)",
+ ),
+ ],
+ ),
+ (
+ "Lookup",
+ [
+ (
+ "uint16",
+ "LookupType",
+ None,
+ None,
+ "Different enumerations for GSUB and GPOS",
+ ),
+ ("LookupFlag", "LookupFlag", None, None, "Lookup qualifiers"),
+ (
+ "uint16",
+ "SubTableCount",
+ None,
+ None,
+ "Number of SubTables for this lookup",
+ ),
+ (
+ "Offset",
+ "SubTable",
+ "SubTableCount",
+ 0,
+ "Array of offsets to SubTables-from beginning of Lookup table",
+ ),
+ (
+ "uint16",
+ "MarkFilteringSet",
+ None,
+ "LookupFlag & 0x0010",
+ "If set, indicates that the lookup table structure is followed by a MarkFilteringSet field. The layout engine skips over all mark glyphs not in the mark filtering set indicated.",
+ ),
+ ],
+ ),
+ (
+ "CoverageFormat1",
+ [
+ ("uint16", "CoverageFormat", None, None, "Format identifier-format = 1"),
+ ("uint16", "GlyphCount", None, None, "Number of glyphs in the GlyphArray"),
+ (
+ "GlyphID",
+ "GlyphArray",
+ "GlyphCount",
+ 0,
+ "Array of GlyphIDs-in numerical order",
+ ),
+ ],
+ ),
+ (
+ "CoverageFormat2",
+ [
+ ("uint16", "CoverageFormat", None, None, "Format identifier-format = 2"),
+ ("uint16", "RangeCount", None, None, "Number of RangeRecords"),
+ (
+ "struct",
+ "RangeRecord",
+ "RangeCount",
+ 0,
+ "Array of glyph ranges-ordered by Start GlyphID",
+ ),
+ ],
+ ),
+ (
+ "RangeRecord",
+ [
+ ("GlyphID", "Start", None, None, "First GlyphID in the range"),
+ ("GlyphID", "End", None, None, "Last GlyphID in the range"),
+ (
+ "uint16",
+ "StartCoverageIndex",
+ None,
+ None,
+ "Coverage Index of first GlyphID in range",
+ ),
+ ],
+ ),
+ (
+ "ClassDefFormat1",
+ [
+ ("uint16", "ClassFormat", None, None, "Format identifier-format = 1"),
+ (
+ "GlyphID",
+ "StartGlyph",
+ None,
+ None,
+ "First GlyphID of the ClassValueArray",
+ ),
+ ("uint16", "GlyphCount", None, None, "Size of the ClassValueArray"),
+ (
+ "uint16",
+ "ClassValueArray",
+ "GlyphCount",
+ 0,
+ "Array of Class Values-one per GlyphID",
+ ),
+ ],
+ ),
+ (
+ "ClassDefFormat2",
+ [
+ ("uint16", "ClassFormat", None, None, "Format identifier-format = 2"),
+ ("uint16", "ClassRangeCount", None, None, "Number of ClassRangeRecords"),
+ (
+ "struct",
+ "ClassRangeRecord",
+ "ClassRangeCount",
+ 0,
+ "Array of ClassRangeRecords-ordered by Start GlyphID",
+ ),
+ ],
+ ),
+ (
+ "ClassRangeRecord",
+ [
+ ("GlyphID", "Start", None, None, "First GlyphID in the range"),
+ ("GlyphID", "End", None, None, "Last GlyphID in the range"),
+ ("uint16", "Class", None, None, "Applied to all glyphs in the range"),
+ ],
+ ),
+ (
+ "Device",
+ [
+ ("uint16", "StartSize", None, None, "Smallest size to correct-in ppem"),
+ ("uint16", "EndSize", None, None, "Largest size to correct-in ppem"),
+ (
+ "uint16",
+ "DeltaFormat",
+ None,
+ None,
+ "Format of DeltaValue array data: 1, 2, or 3",
+ ),
+ (
+ "DeltaValue",
+ "DeltaValue",
+ "",
+ "DeltaFormat in (1,2,3)",
+ "Array of compressed data",
+ ),
+ ],
+ ),
+ #
+ # gpos
+ #
+ (
+ "GPOS",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the GPOS table- 0x00010000 or 0x00010001",
+ ),
+ (
+ "Offset",
+ "ScriptList",
+ None,
+ None,
+ "Offset to ScriptList table-from beginning of GPOS table",
+ ),
+ (
+ "Offset",
+ "FeatureList",
+ None,
+ None,
+ "Offset to FeatureList table-from beginning of GPOS table",
+ ),
+ (
+ "Offset",
+ "LookupList",
+ None,
+ None,
+ "Offset to LookupList table-from beginning of GPOS table",
+ ),
+ (
+ "LOffset",
+ "FeatureVariations",
+ None,
+ "Version >= 0x00010001",
+ "Offset to FeatureVariations table-from beginning of GPOS table",
+ ),
+ ],
+ ),
+ (
+ "SinglePosFormat1",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of SinglePos subtable",
+ ),
+ (
+ "uint16",
+ "ValueFormat",
+ None,
+ None,
+ "Defines the types of data in the ValueRecord",
+ ),
+ (
+ "ValueRecord",
+ "Value",
+ None,
+ None,
+ "Defines positioning value(s)-applied to all glyphs in the Coverage table",
+ ),
+ ],
+ ),
+ (
+ "SinglePosFormat2",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 2"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of SinglePos subtable",
+ ),
+ (
+ "uint16",
+ "ValueFormat",
+ None,
+ None,
+ "Defines the types of data in the ValueRecord",
+ ),
+ ("uint16", "ValueCount", None, None, "Number of ValueRecords"),
+ (
+ "ValueRecord",
+ "Value",
+ "ValueCount",
+ 0,
+ "Array of ValueRecords-positioning values applied to glyphs",
+ ),
+ ],
+ ),
+ (
+ "PairPosFormat1",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of PairPos subtable-only the first glyph in each pair",
+ ),
+ (
+ "uint16",
+ "ValueFormat1",
+ None,
+ None,
+ "Defines the types of data in ValueRecord1-for the first glyph in the pair -may be zero (0)",
+ ),
+ (
+ "uint16",
+ "ValueFormat2",
+ None,
+ None,
+ "Defines the types of data in ValueRecord2-for the second glyph in the pair -may be zero (0)",
+ ),
+ ("uint16", "PairSetCount", None, None, "Number of PairSet tables"),
+ (
+ "Offset",
+ "PairSet",
+ "PairSetCount",
+ 0,
+ "Array of offsets to PairSet tables-from beginning of PairPos subtable-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "PairSet",
+ [
+ ("uint16", "PairValueCount", None, None, "Number of PairValueRecords"),
+ (
+ "struct",
+ "PairValueRecord",
+ "PairValueCount",
+ 0,
+ "Array of PairValueRecords-ordered by GlyphID of the second glyph",
+ ),
+ ],
+ ),
+ (
+ "PairValueRecord",
+ [
+ (
+ "GlyphID",
+ "SecondGlyph",
+ None,
+ None,
+ "GlyphID of second glyph in the pair-first glyph is listed in the Coverage table",
+ ),
+ (
+ "ValueRecord",
+ "Value1",
+ None,
+ None,
+ "Positioning data for the first glyph in the pair",
+ ),
+ (
+ "ValueRecord",
+ "Value2",
+ None,
+ None,
+ "Positioning data for the second glyph in the pair",
+ ),
+ ],
+ ),
+ (
+ "PairPosFormat2",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 2"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of PairPos subtable-for the first glyph of the pair",
+ ),
+ (
+ "uint16",
+ "ValueFormat1",
+ None,
+ None,
+ "ValueRecord definition-for the first glyph of the pair-may be zero (0)",
+ ),
+ (
+ "uint16",
+ "ValueFormat2",
+ None,
+ None,
+ "ValueRecord definition-for the second glyph of the pair-may be zero (0)",
+ ),
+ (
+ "Offset",
+ "ClassDef1",
+ None,
+ None,
+ "Offset to ClassDef table-from beginning of PairPos subtable-for the first glyph of the pair",
+ ),
+ (
+ "Offset",
+ "ClassDef2",
+ None,
+ None,
+ "Offset to ClassDef table-from beginning of PairPos subtable-for the second glyph of the pair",
+ ),
+ (
+ "uint16",
+ "Class1Count",
+ None,
+ None,
+ "Number of classes in ClassDef1 table-includes Class0",
+ ),
+ (
+ "uint16",
+ "Class2Count",
+ None,
+ None,
+ "Number of classes in ClassDef2 table-includes Class0",
+ ),
+ (
+ "struct",
+ "Class1Record",
+ "Class1Count",
+ 0,
+ "Array of Class1 records-ordered by Class1",
+ ),
+ ],
+ ),
+ (
+ "Class1Record",
+ [
+ (
+ "struct",
+ "Class2Record",
+ "Class2Count",
+ 0,
+ "Array of Class2 records-ordered by Class2",
+ ),
+ ],
+ ),
+ (
+ "Class2Record",
+ [
+ (
+ "ValueRecord",
+ "Value1",
+ None,
+ None,
+ "Positioning for first glyph-empty if ValueFormat1 = 0",
+ ),
+ (
+ "ValueRecord",
+ "Value2",
+ None,
+ None,
+ "Positioning for second glyph-empty if ValueFormat2 = 0",
+ ),
+ ],
+ ),
+ (
+ "CursivePosFormat1",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of CursivePos subtable",
+ ),
+ ("uint16", "EntryExitCount", None, None, "Number of EntryExit records"),
+ (
+ "struct",
+ "EntryExitRecord",
+ "EntryExitCount",
+ 0,
+ "Array of EntryExit records-in Coverage Index order",
+ ),
+ ],
+ ),
+ (
+ "EntryExitRecord",
+ [
+ (
+ "Offset",
+ "EntryAnchor",
+ None,
+ None,
+ "Offset to EntryAnchor table-from beginning of CursivePos subtable-may be NULL",
+ ),
+ (
+ "Offset",
+ "ExitAnchor",
+ None,
+ None,
+ "Offset to ExitAnchor table-from beginning of CursivePos subtable-may be NULL",
+ ),
+ ],
+ ),
+ (
+ "MarkBasePosFormat1",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "MarkCoverage",
+ None,
+ None,
+ "Offset to MarkCoverage table-from beginning of MarkBasePos subtable",
+ ),
+ (
+ "Offset",
+ "BaseCoverage",
+ None,
+ None,
+ "Offset to BaseCoverage table-from beginning of MarkBasePos subtable",
+ ),
+ ("uint16", "ClassCount", None, None, "Number of classes defined for marks"),
+ (
+ "Offset",
+ "MarkArray",
+ None,
+ None,
+ "Offset to MarkArray table-from beginning of MarkBasePos subtable",
+ ),
+ (
+ "Offset",
+ "BaseArray",
+ None,
+ None,
+ "Offset to BaseArray table-from beginning of MarkBasePos subtable",
+ ),
+ ],
+ ),
+ (
+ "BaseArray",
+ [
+ ("uint16", "BaseCount", None, None, "Number of BaseRecords"),
+ (
+ "struct",
+ "BaseRecord",
+ "BaseCount",
+ 0,
+ "Array of BaseRecords-in order of BaseCoverage Index",
+ ),
+ ],
+ ),
+ (
+ "BaseRecord",
+ [
+ (
+ "Offset",
+ "BaseAnchor",
+ "ClassCount",
+ 0,
+ "Array of offsets (one per class) to Anchor tables-from beginning of BaseArray table-ordered by class-zero-based",
+ ),
+ ],
+ ),
+ (
+ "MarkLigPosFormat1",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "MarkCoverage",
+ None,
+ None,
+ "Offset to Mark Coverage table-from beginning of MarkLigPos subtable",
+ ),
+ (
+ "Offset",
+ "LigatureCoverage",
+ None,
+ None,
+ "Offset to Ligature Coverage table-from beginning of MarkLigPos subtable",
+ ),
+ ("uint16", "ClassCount", None, None, "Number of defined mark classes"),
+ (
+ "Offset",
+ "MarkArray",
+ None,
+ None,
+ "Offset to MarkArray table-from beginning of MarkLigPos subtable",
+ ),
+ (
+ "Offset",
+ "LigatureArray",
+ None,
+ None,
+ "Offset to LigatureArray table-from beginning of MarkLigPos subtable",
+ ),
+ ],
+ ),
+ (
+ "LigatureArray",
+ [
+ (
+ "uint16",
+ "LigatureCount",
+ None,
+ None,
+ "Number of LigatureAttach table offsets",
+ ),
+ (
+ "Offset",
+ "LigatureAttach",
+ "LigatureCount",
+ 0,
+ "Array of offsets to LigatureAttach tables-from beginning of LigatureArray table-ordered by LigatureCoverage Index",
+ ),
+ ],
+ ),
+ (
+ "LigatureAttach",
+ [
+ (
+ "uint16",
+ "ComponentCount",
+ None,
+ None,
+ "Number of ComponentRecords in this ligature",
+ ),
+ (
+ "struct",
+ "ComponentRecord",
+ "ComponentCount",
+ 0,
+ "Array of Component records-ordered in writing direction",
+ ),
+ ],
+ ),
+ (
+ "ComponentRecord",
+ [
+ (
+ "Offset",
+ "LigatureAnchor",
+ "ClassCount",
+ 0,
+ "Array of offsets (one per class) to Anchor tables-from beginning of LigatureAttach table-ordered by class-NULL if a component does not have an attachment for a class-zero-based array",
+ ),
+ ],
+ ),
+ (
+ "MarkMarkPosFormat1",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Mark1Coverage",
+ None,
+ None,
+ "Offset to Combining Mark Coverage table-from beginning of MarkMarkPos subtable",
+ ),
+ (
+ "Offset",
+ "Mark2Coverage",
+ None,
+ None,
+ "Offset to Base Mark Coverage table-from beginning of MarkMarkPos subtable",
+ ),
+ (
+ "uint16",
+ "ClassCount",
+ None,
+ None,
+ "Number of Combining Mark classes defined",
+ ),
+ (
+ "Offset",
+ "Mark1Array",
+ None,
+ None,
+ "Offset to MarkArray table for Mark1-from beginning of MarkMarkPos subtable",
+ ),
+ (
+ "Offset",
+ "Mark2Array",
+ None,
+ None,
+ "Offset to Mark2Array table for Mark2-from beginning of MarkMarkPos subtable",
+ ),
+ ],
+ ),
+ (
+ "Mark2Array",
+ [
+ ("uint16", "Mark2Count", None, None, "Number of Mark2 records"),
+ (
+ "struct",
+ "Mark2Record",
+ "Mark2Count",
+ 0,
+ "Array of Mark2 records-in Coverage order",
+ ),
+ ],
+ ),
+ (
+ "Mark2Record",
+ [
+ (
+ "Offset",
+ "Mark2Anchor",
+ "ClassCount",
+ 0,
+ "Array of offsets (one per class) to Anchor tables-from beginning of Mark2Array table-zero-based array",
+ ),
+ ],
+ ),
+ (
+ "PosLookupRecord",
+ [
+ (
+ "uint16",
+ "SequenceIndex",
+ None,
+ None,
+ "Index to input glyph sequence-first glyph = 0",
+ ),
+ (
+ "uint16",
+ "LookupListIndex",
+ None,
+ None,
+ "Lookup to apply to that position-zero-based",
+ ),
+ ],
+ ),
+ (
+ "ContextPosFormat1",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of ContextPos subtable",
+ ),
+ ("uint16", "PosRuleSetCount", None, None, "Number of PosRuleSet tables"),
+ (
+ "Offset",
+ "PosRuleSet",
+ "PosRuleSetCount",
+ 0,
+ "Array of offsets to PosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "PosRuleSet",
+ [
+ ("uint16", "PosRuleCount", None, None, "Number of PosRule tables"),
+ (
+ "Offset",
+ "PosRule",
+ "PosRuleCount",
+ 0,
+ "Array of offsets to PosRule tables-from beginning of PosRuleSet-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "PosRule",
+ [
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of glyphs in the Input glyph sequence",
+ ),
+ ("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
+ (
+ "GlyphID",
+ "Input",
+ "GlyphCount",
+ -1,
+ "Array of input GlyphIDs-starting with the second glyph",
+ ),
+ (
+ "struct",
+ "PosLookupRecord",
+ "PosCount",
+ 0,
+ "Array of positioning lookups-in design order",
+ ),
+ ],
+ ),
+ (
+ "ContextPosFormat2",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 2"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of ContextPos subtable",
+ ),
+ (
+ "Offset",
+ "ClassDef",
+ None,
+ None,
+ "Offset to ClassDef table-from beginning of ContextPos subtable",
+ ),
+ ("uint16", "PosClassSetCount", None, None, "Number of PosClassSet tables"),
+ (
+ "Offset",
+ "PosClassSet",
+ "PosClassSetCount",
+ 0,
+ "Array of offsets to PosClassSet tables-from beginning of ContextPos subtable-ordered by class-may be NULL",
+ ),
+ ],
+ ),
+ (
+ "PosClassSet",
+ [
+ (
+ "uint16",
+ "PosClassRuleCount",
+ None,
+ None,
+ "Number of PosClassRule tables",
+ ),
+ (
+ "Offset",
+ "PosClassRule",
+ "PosClassRuleCount",
+ 0,
+ "Array of offsets to PosClassRule tables-from beginning of PosClassSet-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "PosClassRule",
+ [
+ ("uint16", "GlyphCount", None, None, "Number of glyphs to be matched"),
+ ("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
+ (
+ "uint16",
+ "Class",
+ "GlyphCount",
+ -1,
+ "Array of classes-beginning with the second class-to be matched to the input glyph sequence",
+ ),
+ (
+ "struct",
+ "PosLookupRecord",
+ "PosCount",
+ 0,
+ "Array of positioning lookups-in design order",
+ ),
+ ],
+ ),
+ (
+ "ContextPosFormat3",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 3"),
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of glyphs in the input sequence",
+ ),
+ ("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
+ (
+ "Offset",
+ "Coverage",
+ "GlyphCount",
+ 0,
+ "Array of offsets to Coverage tables-from beginning of ContextPos subtable",
+ ),
+ (
+ "struct",
+ "PosLookupRecord",
+ "PosCount",
+ 0,
+ "Array of positioning lookups-in design order",
+ ),
+ ],
+ ),
+ (
+ "ChainContextPosFormat1",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of ContextPos subtable",
+ ),
+ (
+ "uint16",
+ "ChainPosRuleSetCount",
+ None,
+ None,
+ "Number of ChainPosRuleSet tables",
+ ),
+ (
+ "Offset",
+ "ChainPosRuleSet",
+ "ChainPosRuleSetCount",
+ 0,
+ "Array of offsets to ChainPosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "ChainPosRuleSet",
+ [
+ (
+ "uint16",
+ "ChainPosRuleCount",
+ None,
+ None,
+ "Number of ChainPosRule tables",
+ ),
+ (
+ "Offset",
+ "ChainPosRule",
+ "ChainPosRuleCount",
+ 0,
+ "Array of offsets to ChainPosRule tables-from beginning of ChainPosRuleSet-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "ChainPosRule",
+ [
+ (
+ "uint16",
+ "BacktrackGlyphCount",
+ None,
+ None,
+ "Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)",
+ ),
+ (
+ "GlyphID",
+ "Backtrack",
+ "BacktrackGlyphCount",
+ 0,
+ "Array of backtracking GlyphID's (to be matched before the input sequence)",
+ ),
+ (
+ "uint16",
+ "InputGlyphCount",
+ None,
+ None,
+ "Total number of glyphs in the input sequence (includes the first glyph)",
+ ),
+ (
+ "GlyphID",
+ "Input",
+ "InputGlyphCount",
+ -1,
+ "Array of input GlyphIDs (start with second glyph)",
+ ),
+ (
+ "uint16",
+ "LookAheadGlyphCount",
+ None,
+ None,
+ "Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)",
+ ),
+ (
+ "GlyphID",
+ "LookAhead",
+ "LookAheadGlyphCount",
+ 0,
+ "Array of lookahead GlyphID's (to be matched after the input sequence)",
+ ),
+ ("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
+ (
+ "struct",
+ "PosLookupRecord",
+ "PosCount",
+ 0,
+ "Array of PosLookupRecords (in design order)",
+ ),
+ ],
+ ),
+ (
+ "ChainContextPosFormat2",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 2"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of ChainContextPos subtable",
+ ),
+ (
+ "Offset",
+ "BacktrackClassDef",
+ None,
+ None,
+ "Offset to ClassDef table containing backtrack sequence context-from beginning of ChainContextPos subtable",
+ ),
+ (
+ "Offset",
+ "InputClassDef",
+ None,
+ None,
+ "Offset to ClassDef table containing input sequence context-from beginning of ChainContextPos subtable",
+ ),
+ (
+ "Offset",
+ "LookAheadClassDef",
+ None,
+ None,
+ "Offset to ClassDef table containing lookahead sequence context-from beginning of ChainContextPos subtable",
+ ),
+ (
+ "uint16",
+ "ChainPosClassSetCount",
+ None,
+ None,
+ "Number of ChainPosClassSet tables",
+ ),
+ (
+ "Offset",
+ "ChainPosClassSet",
+ "ChainPosClassSetCount",
+ 0,
+ "Array of offsets to ChainPosClassSet tables-from beginning of ChainContextPos subtable-ordered by input class-may be NULL",
+ ),
+ ],
+ ),
+ (
+ "ChainPosClassSet",
+ [
+ (
+ "uint16",
+ "ChainPosClassRuleCount",
+ None,
+ None,
+ "Number of ChainPosClassRule tables",
+ ),
+ (
+ "Offset",
+ "ChainPosClassRule",
+ "ChainPosClassRuleCount",
+ 0,
+ "Array of offsets to ChainPosClassRule tables-from beginning of ChainPosClassSet-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "ChainPosClassRule",
+ [
+ (
+ "uint16",
+ "BacktrackGlyphCount",
+ None,
+ None,
+ "Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)",
+ ),
+ (
+ "uint16",
+ "Backtrack",
+ "BacktrackGlyphCount",
+ 0,
+ "Array of backtracking classes(to be matched before the input sequence)",
+ ),
+ (
+ "uint16",
+ "InputGlyphCount",
+ None,
+ None,
+ "Total number of classes in the input sequence (includes the first class)",
+ ),
+ (
+ "uint16",
+ "Input",
+ "InputGlyphCount",
+ -1,
+ "Array of input classes(start with second class; to be matched with the input glyph sequence)",
+ ),
+ (
+ "uint16",
+ "LookAheadGlyphCount",
+ None,
+ None,
+ "Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)",
+ ),
+ (
+ "uint16",
+ "LookAhead",
+ "LookAheadGlyphCount",
+ 0,
+ "Array of lookahead classes(to be matched after the input sequence)",
+ ),
+ ("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
+ (
+ "struct",
+ "PosLookupRecord",
+ "PosCount",
+ 0,
+ "Array of PosLookupRecords (in design order)",
+ ),
+ ],
+ ),
+ (
+ "ChainContextPosFormat3",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 3"),
+ (
+ "uint16",
+ "BacktrackGlyphCount",
+ None,
+ None,
+ "Number of glyphs in the backtracking sequence",
+ ),
+ (
+ "Offset",
+ "BacktrackCoverage",
+ "BacktrackGlyphCount",
+ 0,
+ "Array of offsets to coverage tables in backtracking sequence, in glyph sequence order",
+ ),
+ (
+ "uint16",
+ "InputGlyphCount",
+ None,
+ None,
+ "Number of glyphs in input sequence",
+ ),
+ (
+ "Offset",
+ "InputCoverage",
+ "InputGlyphCount",
+ 0,
+ "Array of offsets to coverage tables in input sequence, in glyph sequence order",
+ ),
+ (
+ "uint16",
+ "LookAheadGlyphCount",
+ None,
+ None,
+ "Number of glyphs in lookahead sequence",
+ ),
+ (
+ "Offset",
+ "LookAheadCoverage",
+ "LookAheadGlyphCount",
+ 0,
+ "Array of offsets to coverage tables in lookahead sequence, in glyph sequence order",
+ ),
+ ("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
+ (
+ "struct",
+ "PosLookupRecord",
+ "PosCount",
+ 0,
+ "Array of PosLookupRecords,in design order",
+ ),
+ ],
+ ),
+ (
+ "ExtensionPosFormat1",
+ [
+ ("uint16", "ExtFormat", None, None, "Format identifier. Set to 1."),
+ (
+ "uint16",
+ "ExtensionLookupType",
+ None,
+ None,
+ "Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).",
+ ),
+ ("LOffset", "ExtSubTable", None, None, "Offset to SubTable"),
+ ],
+ ),
+ # ('ValueRecord', [
+ # ('int16', 'XPlacement', None, None, 'Horizontal adjustment for placement-in design units'),
+ # ('int16', 'YPlacement', None, None, 'Vertical adjustment for placement-in design units'),
+ # ('int16', 'XAdvance', None, None, 'Horizontal adjustment for advance-in design units (only used for horizontal writing)'),
+ # ('int16', 'YAdvance', None, None, 'Vertical adjustment for advance-in design units (only used for vertical writing)'),
+ # ('Offset', 'XPlaDevice', None, None, 'Offset to Device table for horizontal placement-measured from beginning of PosTable (may be NULL)'),
+ # ('Offset', 'YPlaDevice', None, None, 'Offset to Device table for vertical placement-measured from beginning of PosTable (may be NULL)'),
+ # ('Offset', 'XAdvDevice', None, None, 'Offset to Device table for horizontal advance-measured from beginning of PosTable (may be NULL)'),
+ # ('Offset', 'YAdvDevice', None, None, 'Offset to Device table for vertical advance-measured from beginning of PosTable (may be NULL)'),
+ # ]),
+ (
+ "AnchorFormat1",
+ [
+ ("uint16", "AnchorFormat", None, None, "Format identifier-format = 1"),
+ ("int16", "XCoordinate", None, None, "Horizontal value-in design units"),
+ ("int16", "YCoordinate", None, None, "Vertical value-in design units"),
+ ],
+ ),
+ (
+ "AnchorFormat2",
+ [
+ ("uint16", "AnchorFormat", None, None, "Format identifier-format = 2"),
+ ("int16", "XCoordinate", None, None, "Horizontal value-in design units"),
+ ("int16", "YCoordinate", None, None, "Vertical value-in design units"),
+ ("uint16", "AnchorPoint", None, None, "Index to glyph contour point"),
+ ],
+ ),
+ (
+ "AnchorFormat3",
+ [
+ ("uint16", "AnchorFormat", None, None, "Format identifier-format = 3"),
+ ("int16", "XCoordinate", None, None, "Horizontal value-in design units"),
+ ("int16", "YCoordinate", None, None, "Vertical value-in design units"),
+ (
+ "Offset",
+ "XDeviceTable",
+ None,
+ None,
+ "Offset to Device table for X coordinate- from beginning of Anchor table (may be NULL)",
+ ),
+ (
+ "Offset",
+ "YDeviceTable",
+ None,
+ None,
+ "Offset to Device table for Y coordinate- from beginning of Anchor table (may be NULL)",
+ ),
+ ],
+ ),
+ (
+ "MarkArray",
+ [
+ ("uint16", "MarkCount", None, None, "Number of MarkRecords"),
+ (
+ "struct",
+ "MarkRecord",
+ "MarkCount",
+ 0,
+ "Array of MarkRecords-in Coverage order",
+ ),
+ ],
+ ),
+ (
+ "MarkRecord",
+ [
+ ("uint16", "Class", None, None, "Class defined for this mark"),
+ (
+ "Offset",
+ "MarkAnchor",
+ None,
+ None,
+ "Offset to Anchor table-from beginning of MarkArray table",
+ ),
+ ],
+ ),
+ #
+ # gsub
+ #
+ (
+ "GSUB",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the GSUB table- 0x00010000 or 0x00010001",
+ ),
+ (
+ "Offset",
+ "ScriptList",
+ None,
+ None,
+ "Offset to ScriptList table-from beginning of GSUB table",
+ ),
+ (
+ "Offset",
+ "FeatureList",
+ None,
+ None,
+ "Offset to FeatureList table-from beginning of GSUB table",
+ ),
+ (
+ "Offset",
+ "LookupList",
+ None,
+ None,
+ "Offset to LookupList table-from beginning of GSUB table",
+ ),
+ (
+ "LOffset",
+ "FeatureVariations",
+ None,
+ "Version >= 0x00010001",
+ "Offset to FeatureVariations table-from beginning of GSUB table",
+ ),
+ ],
+ ),
+ (
+ "SingleSubstFormat1",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ (
+ "uint16",
+ "DeltaGlyphID",
+ None,
+ None,
+ "Add to original GlyphID modulo 65536 to get substitute GlyphID",
+ ),
+ ],
+ ),
+ (
+ "SingleSubstFormat2",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 2"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of GlyphIDs in the Substitute array",
+ ),
+ (
+ "GlyphID",
+ "Substitute",
+ "GlyphCount",
+ 0,
+ "Array of substitute GlyphIDs-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "MultipleSubstFormat1",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ (
+ "uint16",
+ "SequenceCount",
+ None,
+ None,
+ "Number of Sequence table offsets in the Sequence array",
+ ),
+ (
+ "Offset",
+ "Sequence",
+ "SequenceCount",
+ 0,
+ "Array of offsets to Sequence tables-from beginning of Substitution table-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "Sequence",
+ [
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of GlyphIDs in the Substitute array. This should always be greater than 0.",
+ ),
+ (
+ "GlyphID",
+ "Substitute",
+ "GlyphCount",
+ 0,
+ "String of GlyphIDs to substitute",
+ ),
+ ],
+ ),
+ (
+ "AlternateSubstFormat1",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ (
+ "uint16",
+ "AlternateSetCount",
+ None,
+ None,
+ "Number of AlternateSet tables",
+ ),
+ (
+ "Offset",
+ "AlternateSet",
+ "AlternateSetCount",
+ 0,
+ "Array of offsets to AlternateSet tables-from beginning of Substitution table-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "AlternateSet",
+ [
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of GlyphIDs in the Alternate array",
+ ),
+ (
+ "GlyphID",
+ "Alternate",
+ "GlyphCount",
+ 0,
+ "Array of alternate GlyphIDs-in arbitrary order",
+ ),
+ ],
+ ),
+ (
+ "LigatureSubstFormat1",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ ("uint16", "LigSetCount", None, None, "Number of LigatureSet tables"),
+ (
+ "Offset",
+ "LigatureSet",
+ "LigSetCount",
+ 0,
+ "Array of offsets to LigatureSet tables-from beginning of Substitution table-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "LigatureSet",
+ [
+ ("uint16", "LigatureCount", None, None, "Number of Ligature tables"),
+ (
+ "Offset",
+ "Ligature",
+ "LigatureCount",
+ 0,
+ "Array of offsets to Ligature tables-from beginning of LigatureSet table-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "Ligature",
+ [
+ ("GlyphID", "LigGlyph", None, None, "GlyphID of ligature to substitute"),
+ ("uint16", "CompCount", None, None, "Number of components in the ligature"),
+ (
+ "GlyphID",
+ "Component",
+ "CompCount",
+ -1,
+ "Array of component GlyphIDs-start with the second component-ordered in writing direction",
+ ),
+ ],
+ ),
+ (
+ "SubstLookupRecord",
+ [
+ (
+ "uint16",
+ "SequenceIndex",
+ None,
+ None,
+ "Index into current glyph sequence-first glyph = 0",
+ ),
+ (
+ "uint16",
+ "LookupListIndex",
+ None,
+ None,
+ "Lookup to apply to that position-zero-based",
+ ),
+ ],
+ ),
+ (
+ "ContextSubstFormat1",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ (
+ "uint16",
+ "SubRuleSetCount",
+ None,
+ None,
+ "Number of SubRuleSet tables-must equal GlyphCount in Coverage table",
+ ),
+ (
+ "Offset",
+ "SubRuleSet",
+ "SubRuleSetCount",
+ 0,
+ "Array of offsets to SubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "SubRuleSet",
+ [
+ ("uint16", "SubRuleCount", None, None, "Number of SubRule tables"),
+ (
+ "Offset",
+ "SubRule",
+ "SubRuleCount",
+ 0,
+ "Array of offsets to SubRule tables-from beginning of SubRuleSet table-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "SubRule",
+ [
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Total number of glyphs in input glyph sequence-includes the first glyph",
+ ),
+ ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
+ (
+ "GlyphID",
+ "Input",
+ "GlyphCount",
+ -1,
+ "Array of input GlyphIDs-start with second glyph",
+ ),
+ (
+ "struct",
+ "SubstLookupRecord",
+ "SubstCount",
+ 0,
+ "Array of SubstLookupRecords-in design order",
+ ),
+ ],
+ ),
+ (
+ "ContextSubstFormat2",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 2"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ (
+ "Offset",
+ "ClassDef",
+ None,
+ None,
+ "Offset to glyph ClassDef table-from beginning of Substitution table",
+ ),
+ ("uint16", "SubClassSetCount", None, None, "Number of SubClassSet tables"),
+ (
+ "Offset",
+ "SubClassSet",
+ "SubClassSetCount",
+ 0,
+ "Array of offsets to SubClassSet tables-from beginning of Substitution table-ordered by class-may be NULL",
+ ),
+ ],
+ ),
+ (
+ "SubClassSet",
+ [
+ (
+ "uint16",
+ "SubClassRuleCount",
+ None,
+ None,
+ "Number of SubClassRule tables",
+ ),
+ (
+ "Offset",
+ "SubClassRule",
+ "SubClassRuleCount",
+ 0,
+ "Array of offsets to SubClassRule tables-from beginning of SubClassSet-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "SubClassRule",
+ [
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Total number of classes specified for the context in the rule-includes the first class",
+ ),
+ ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
+ (
+ "uint16",
+ "Class",
+ "GlyphCount",
+ -1,
+ "Array of classes-beginning with the second class-to be matched to the input glyph class sequence",
+ ),
+ (
+ "struct",
+ "SubstLookupRecord",
+ "SubstCount",
+ 0,
+ "Array of Substitution lookups-in design order",
+ ),
+ ],
+ ),
+ (
+ "ContextSubstFormat3",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 3"),
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of glyphs in the input glyph sequence",
+ ),
+ ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
+ (
+ "Offset",
+ "Coverage",
+ "GlyphCount",
+ 0,
+ "Array of offsets to Coverage table-from beginning of Substitution table-in glyph sequence order",
+ ),
+ (
+ "struct",
+ "SubstLookupRecord",
+ "SubstCount",
+ 0,
+ "Array of SubstLookupRecords-in design order",
+ ),
+ ],
+ ),
+ (
+ "ChainContextSubstFormat1",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ (
+ "uint16",
+ "ChainSubRuleSetCount",
+ None,
+ None,
+ "Number of ChainSubRuleSet tables-must equal GlyphCount in Coverage table",
+ ),
+ (
+ "Offset",
+ "ChainSubRuleSet",
+ "ChainSubRuleSetCount",
+ 0,
+ "Array of offsets to ChainSubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "ChainSubRuleSet",
+ [
+ (
+ "uint16",
+ "ChainSubRuleCount",
+ None,
+ None,
+ "Number of ChainSubRule tables",
+ ),
+ (
+ "Offset",
+ "ChainSubRule",
+ "ChainSubRuleCount",
+ 0,
+ "Array of offsets to ChainSubRule tables-from beginning of ChainSubRuleSet table-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "ChainSubRule",
+ [
+ (
+ "uint16",
+ "BacktrackGlyphCount",
+ None,
+ None,
+ "Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)",
+ ),
+ (
+ "GlyphID",
+ "Backtrack",
+ "BacktrackGlyphCount",
+ 0,
+ "Array of backtracking GlyphID's (to be matched before the input sequence)",
+ ),
+ (
+ "uint16",
+ "InputGlyphCount",
+ None,
+ None,
+ "Total number of glyphs in the input sequence (includes the first glyph)",
+ ),
+ (
+ "GlyphID",
+ "Input",
+ "InputGlyphCount",
+ -1,
+ "Array of input GlyphIDs (start with second glyph)",
+ ),
+ (
+ "uint16",
+ "LookAheadGlyphCount",
+ None,
+ None,
+ "Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)",
+ ),
+ (
+ "GlyphID",
+ "LookAhead",
+ "LookAheadGlyphCount",
+ 0,
+ "Array of lookahead GlyphID's (to be matched after the input sequence)",
+ ),
+ ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
+ (
+ "struct",
+ "SubstLookupRecord",
+ "SubstCount",
+ 0,
+ "Array of SubstLookupRecords (in design order)",
+ ),
+ ],
+ ),
+ (
+ "ChainContextSubstFormat2",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 2"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ (
+ "Offset",
+ "BacktrackClassDef",
+ None,
+ None,
+ "Offset to glyph ClassDef table containing backtrack sequence data-from beginning of Substitution table",
+ ),
+ (
+ "Offset",
+ "InputClassDef",
+ None,
+ None,
+ "Offset to glyph ClassDef table containing input sequence data-from beginning of Substitution table",
+ ),
+ (
+ "Offset",
+ "LookAheadClassDef",
+ None,
+ None,
+ "Offset to glyph ClassDef table containing lookahead sequence data-from beginning of Substitution table",
+ ),
+ (
+ "uint16",
+ "ChainSubClassSetCount",
+ None,
+ None,
+ "Number of ChainSubClassSet tables",
+ ),
+ (
+ "Offset",
+ "ChainSubClassSet",
+ "ChainSubClassSetCount",
+ 0,
+ "Array of offsets to ChainSubClassSet tables-from beginning of Substitution table-ordered by input class-may be NULL",
+ ),
+ ],
+ ),
+ (
+ "ChainSubClassSet",
+ [
+ (
+ "uint16",
+ "ChainSubClassRuleCount",
+ None,
+ None,
+ "Number of ChainSubClassRule tables",
+ ),
+ (
+ "Offset",
+ "ChainSubClassRule",
+ "ChainSubClassRuleCount",
+ 0,
+ "Array of offsets to ChainSubClassRule tables-from beginning of ChainSubClassSet-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "ChainSubClassRule",
+ [
+ (
+ "uint16",
+ "BacktrackGlyphCount",
+ None,
+ None,
+ "Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)",
+ ),
+ (
+ "uint16",
+ "Backtrack",
+ "BacktrackGlyphCount",
+ 0,
+ "Array of backtracking classes(to be matched before the input sequence)",
+ ),
+ (
+ "uint16",
+ "InputGlyphCount",
+ None,
+ None,
+ "Total number of classes in the input sequence (includes the first class)",
+ ),
+ (
+ "uint16",
+ "Input",
+ "InputGlyphCount",
+ -1,
+ "Array of input classes(start with second class; to be matched with the input glyph sequence)",
+ ),
+ (
+ "uint16",
+ "LookAheadGlyphCount",
+ None,
+ None,
+ "Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)",
+ ),
+ (
+ "uint16",
+ "LookAhead",
+ "LookAheadGlyphCount",
+ 0,
+ "Array of lookahead classes(to be matched after the input sequence)",
+ ),
+ ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
+ (
+ "struct",
+ "SubstLookupRecord",
+ "SubstCount",
+ 0,
+ "Array of SubstLookupRecords (in design order)",
+ ),
+ ],
+ ),
+ (
+ "ChainContextSubstFormat3",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 3"),
+ (
+ "uint16",
+ "BacktrackGlyphCount",
+ None,
+ None,
+ "Number of glyphs in the backtracking sequence",
+ ),
+ (
+ "Offset",
+ "BacktrackCoverage",
+ "BacktrackGlyphCount",
+ 0,
+ "Array of offsets to coverage tables in backtracking sequence, in glyph sequence order",
+ ),
+ (
+ "uint16",
+ "InputGlyphCount",
+ None,
+ None,
+ "Number of glyphs in input sequence",
+ ),
+ (
+ "Offset",
+ "InputCoverage",
+ "InputGlyphCount",
+ 0,
+ "Array of offsets to coverage tables in input sequence, in glyph sequence order",
+ ),
+ (
+ "uint16",
+ "LookAheadGlyphCount",
+ None,
+ None,
+ "Number of glyphs in lookahead sequence",
+ ),
+ (
+ "Offset",
+ "LookAheadCoverage",
+ "LookAheadGlyphCount",
+ 0,
+ "Array of offsets to coverage tables in lookahead sequence, in glyph sequence order",
+ ),
+ ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
+ (
+ "struct",
+ "SubstLookupRecord",
+ "SubstCount",
+ 0,
+ "Array of SubstLookupRecords, in design order",
+ ),
+ ],
+ ),
+ (
+ "ExtensionSubstFormat1",
+ [
+ ("uint16", "ExtFormat", None, None, "Format identifier. Set to 1."),
+ (
+ "uint16",
+ "ExtensionLookupType",
+ None,
+ None,
+ "Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).",
+ ),
+ (
+ "LOffset",
+ "ExtSubTable",
+ None,
+ None,
+ "Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)",
+ ),
+ ],
+ ),
+ (
+ "ReverseChainSingleSubstFormat1",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ 0,
+ "Offset to Coverage table - from beginning of Substitution table",
+ ),
+ (
+ "uint16",
+ "BacktrackGlyphCount",
+ None,
+ None,
+ "Number of glyphs in the backtracking sequence",
+ ),
+ (
+ "Offset",
+ "BacktrackCoverage",
+ "BacktrackGlyphCount",
+ 0,
+ "Array of offsets to coverage tables in backtracking sequence, in glyph sequence order",
+ ),
+ (
+ "uint16",
+ "LookAheadGlyphCount",
+ None,
+ None,
+ "Number of glyphs in lookahead sequence",
+ ),
+ (
+ "Offset",
+ "LookAheadCoverage",
+ "LookAheadGlyphCount",
+ 0,
+ "Array of offsets to coverage tables in lookahead sequence, in glyph sequence order",
+ ),
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of GlyphIDs in the Substitute array",
+ ),
+ (
+ "GlyphID",
+ "Substitute",
+ "GlyphCount",
+ 0,
+ "Array of substitute GlyphIDs-ordered by Coverage index",
+ ),
+ ],
+ ),
+ #
+ # gdef
+ #
+ (
+ "GDEF",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the GDEF table- 0x00010000, 0x00010002, or 0x00010003",
+ ),
+ (
+ "Offset",
+ "GlyphClassDef",
+ None,
+ None,
+ "Offset to class definition table for glyph type-from beginning of GDEF header (may be NULL)",
+ ),
+ (
+ "Offset",
+ "AttachList",
+ None,
+ None,
+ "Offset to list of glyphs with attachment points-from beginning of GDEF header (may be NULL)",
+ ),
+ (
+ "Offset",
+ "LigCaretList",
+ None,
+ None,
+ "Offset to list of positioning points for ligature carets-from beginning of GDEF header (may be NULL)",
+ ),
+ (
+ "Offset",
+ "MarkAttachClassDef",
+ None,
+ None,
+ "Offset to class definition table for mark attachment type-from beginning of GDEF header (may be NULL)",
+ ),
+ (
+ "Offset",
+ "MarkGlyphSetsDef",
+ None,
+ "Version >= 0x00010002",
+ "Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)",
+ ),
+ (
+ "LOffset",
+ "VarStore",
+ None,
+ "Version >= 0x00010003",
+ "Offset to variation store (may be NULL)",
+ ),
+ ],
+ ),
+ (
+ "AttachList",
+ [
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table - from beginning of AttachList table",
+ ),
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of glyphs with attachment points",
+ ),
+ (
+ "Offset",
+ "AttachPoint",
+ "GlyphCount",
+ 0,
+ "Array of offsets to AttachPoint tables-from beginning of AttachList table-in Coverage Index order",
+ ),
+ ],
+ ),
+ (
+ "AttachPoint",
+ [
+ (
+ "uint16",
+ "PointCount",
+ None,
+ None,
+ "Number of attachment points on this glyph",
+ ),
+ (
+ "uint16",
+ "PointIndex",
+ "PointCount",
+ 0,
+ "Array of contour point indices -in increasing numerical order",
+ ),
+ ],
+ ),
+ (
+ "LigCaretList",
+ [
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table - from beginning of LigCaretList table",
+ ),
+ ("uint16", "LigGlyphCount", None, None, "Number of ligature glyphs"),
+ (
+ "Offset",
+ "LigGlyph",
+ "LigGlyphCount",
+ 0,
+ "Array of offsets to LigGlyph tables-from beginning of LigCaretList table-in Coverage Index order",
+ ),
+ ],
+ ),
+ (
+ "LigGlyph",
+ [
+ (
+ "uint16",
+ "CaretCount",
+ None,
+ None,
+ "Number of CaretValues for this ligature (components - 1)",
+ ),
+ (
+ "Offset",
+ "CaretValue",
+ "CaretCount",
+ 0,
+ "Array of offsets to CaretValue tables-from beginning of LigGlyph table-in increasing coordinate order",
+ ),
+ ],
+ ),
+ (
+ "CaretValueFormat1",
+ [
+ ("uint16", "CaretValueFormat", None, None, "Format identifier-format = 1"),
+ ("int16", "Coordinate", None, None, "X or Y value, in design units"),
+ ],
+ ),
+ (
+ "CaretValueFormat2",
+ [
+ ("uint16", "CaretValueFormat", None, None, "Format identifier-format = 2"),
+ ("uint16", "CaretValuePoint", None, None, "Contour point index on glyph"),
+ ],
+ ),
+ (
+ "CaretValueFormat3",
+ [
+ ("uint16", "CaretValueFormat", None, None, "Format identifier-format = 3"),
+ ("int16", "Coordinate", None, None, "X or Y value, in design units"),
+ (
+ "Offset",
+ "DeviceTable",
+ None,
+ None,
+ "Offset to Device table for X or Y value-from beginning of CaretValue table",
+ ),
+ ],
+ ),
+ (
+ "MarkGlyphSetsDef",
+ [
+ ("uint16", "MarkSetTableFormat", None, None, "Format identifier == 1"),
+ ("uint16", "MarkSetCount", None, None, "Number of mark sets defined"),
+ (
+ "LOffset",
+ "Coverage",
+ "MarkSetCount",
+ 0,
+ "Array of offsets to mark set coverage tables.",
+ ),
+ ],
+ ),
+ #
+ # base
+ #
+ (
+ "BASE",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the BASE table-initially 0x00010000",
+ ),
+ (
+ "Offset",
+ "HorizAxis",
+ None,
+ None,
+ "Offset to horizontal Axis table-from beginning of BASE table-may be NULL",
+ ),
+ (
+ "Offset",
+ "VertAxis",
+ None,
+ None,
+ "Offset to vertical Axis table-from beginning of BASE table-may be NULL",
+ ),
+ (
+ "LOffset",
+ "VarStore",
+ None,
+ "Version >= 0x00010001",
+ "Offset to variation store (may be NULL)",
+ ),
+ ],
+ ),
+ (
+ "Axis",
+ [
+ (
+ "Offset",
+ "BaseTagList",
+ None,
+ None,
+ "Offset to BaseTagList table-from beginning of Axis table-may be NULL",
+ ),
+ (
+ "Offset",
+ "BaseScriptList",
+ None,
+ None,
+ "Offset to BaseScriptList table-from beginning of Axis table",
+ ),
+ ],
+ ),
+ (
+ "BaseTagList",
+ [
+ (
+ "uint16",
+ "BaseTagCount",
+ None,
+ None,
+ "Number of baseline identification tags in this text direction-may be zero (0)",
+ ),
+ (
+ "Tag",
+ "BaselineTag",
+ "BaseTagCount",
+ 0,
+ "Array of 4-byte baseline identification tags-must be in alphabetical order",
+ ),
+ ],
+ ),
+ (
+ "BaseScriptList",
+ [
+ (
+ "uint16",
+ "BaseScriptCount",
+ None,
+ None,
+ "Number of BaseScriptRecords defined",
+ ),
+ (
+ "struct",
+ "BaseScriptRecord",
+ "BaseScriptCount",
+ 0,
+ "Array of BaseScriptRecords-in alphabetical order by BaseScriptTag",
+ ),
+ ],
+ ),
+ (
+ "BaseScriptRecord",
+ [
+ ("Tag", "BaseScriptTag", None, None, "4-byte script identification tag"),
+ (
+ "Offset",
+ "BaseScript",
+ None,
+ None,
+ "Offset to BaseScript table-from beginning of BaseScriptList",
+ ),
+ ],
+ ),
+ (
+ "BaseScript",
+ [
+ (
+ "Offset",
+ "BaseValues",
+ None,
+ None,
+ "Offset to BaseValues table-from beginning of BaseScript table-may be NULL",
+ ),
+ (
+ "Offset",
+ "DefaultMinMax",
+ None,
+ None,
+ "Offset to MinMax table- from beginning of BaseScript table-may be NULL",
+ ),
+ (
+ "uint16",
+ "BaseLangSysCount",
+ None,
+ None,
+ "Number of BaseLangSysRecords defined-may be zero (0)",
+ ),
+ (
+ "struct",
+ "BaseLangSysRecord",
+ "BaseLangSysCount",
+ 0,
+ "Array of BaseLangSysRecords-in alphabetical order by BaseLangSysTag",
+ ),
+ ],
+ ),
+ (
+ "BaseLangSysRecord",
+ [
+ (
+ "Tag",
+ "BaseLangSysTag",
+ None,
+ None,
+ "4-byte language system identification tag",
+ ),
+ (
+ "Offset",
+ "MinMax",
+ None,
+ None,
+ "Offset to MinMax table-from beginning of BaseScript table",
+ ),
+ ],
+ ),
+ (
+ "BaseValues",
+ [
+ (
+ "uint16",
+ "DefaultIndex",
+ None,
+ None,
+ "Index number of default baseline for this script-equals index position of baseline tag in BaselineArray of the BaseTagList",
+ ),
+ (
+ "uint16",
+ "BaseCoordCount",
+ None,
+ None,
+ "Number of BaseCoord tables defined-should equal BaseTagCount in the BaseTagList",
+ ),
+ (
+ "Offset",
+ "BaseCoord",
+ "BaseCoordCount",
+ 0,
+ "Array of offsets to BaseCoord-from beginning of BaseValues table-order matches BaselineTag array in the BaseTagList",
+ ),
+ ],
+ ),
+ (
+ "MinMax",
+ [
+ (
+ "Offset",
+ "MinCoord",
+ None,
+ None,
+ "Offset to BaseCoord table-defines minimum extent value-from the beginning of MinMax table-may be NULL",
+ ),
+ (
+ "Offset",
+ "MaxCoord",
+ None,
+ None,
+ "Offset to BaseCoord table-defines maximum extent value-from the beginning of MinMax table-may be NULL",
+ ),
+ (
+ "uint16",
+ "FeatMinMaxCount",
+ None,
+ None,
+ "Number of FeatMinMaxRecords-may be zero (0)",
+ ),
+ (
+ "struct",
+ "FeatMinMaxRecord",
+ "FeatMinMaxCount",
+ 0,
+ "Array of FeatMinMaxRecords-in alphabetical order, by FeatureTableTag",
+ ),
+ ],
+ ),
+ (
+ "FeatMinMaxRecord",
+ [
+ (
+ "Tag",
+ "FeatureTableTag",
+ None,
+ None,
+ "4-byte feature identification tag-must match FeatureTag in FeatureList",
+ ),
+ (
+ "Offset",
+ "MinCoord",
+ None,
+ None,
+ "Offset to BaseCoord table-defines minimum extent value-from beginning of MinMax table-may be NULL",
+ ),
+ (
+ "Offset",
+ "MaxCoord",
+ None,
+ None,
+ "Offset to BaseCoord table-defines maximum extent value-from beginning of MinMax table-may be NULL",
+ ),
+ ],
+ ),
+ (
+ "BaseCoordFormat1",
+ [
+ ("uint16", "BaseCoordFormat", None, None, "Format identifier-format = 1"),
+ ("int16", "Coordinate", None, None, "X or Y value, in design units"),
+ ],
+ ),
+ (
+ "BaseCoordFormat2",
+ [
+ ("uint16", "BaseCoordFormat", None, None, "Format identifier-format = 2"),
+ ("int16", "Coordinate", None, None, "X or Y value, in design units"),
+ ("GlyphID", "ReferenceGlyph", None, None, "GlyphID of control glyph"),
+ (
+ "uint16",
+ "BaseCoordPoint",
+ None,
+ None,
+ "Index of contour point on the ReferenceGlyph",
+ ),
+ ],
+ ),
+ (
+ "BaseCoordFormat3",
+ [
+ ("uint16", "BaseCoordFormat", None, None, "Format identifier-format = 3"),
+ ("int16", "Coordinate", None, None, "X or Y value, in design units"),
+ (
+ "Offset",
+ "DeviceTable",
+ None,
+ None,
+ "Offset to Device table for X or Y value",
+ ),
+ ],
+ ),
+ #
+ # jstf
+ #
+ (
+ "JSTF",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the JSTF table-initially set to 0x00010000",
+ ),
+ (
+ "uint16",
+ "JstfScriptCount",
+ None,
+ None,
+ "Number of JstfScriptRecords in this table",
+ ),
+ (
+ "struct",
+ "JstfScriptRecord",
+ "JstfScriptCount",
+ 0,
+ "Array of JstfScriptRecords-in alphabetical order, by JstfScriptTag",
+ ),
+ ],
+ ),
+ (
+ "JstfScriptRecord",
+ [
+ ("Tag", "JstfScriptTag", None, None, "4-byte JstfScript identification"),
+ (
+ "Offset",
+ "JstfScript",
+ None,
+ None,
+ "Offset to JstfScript table-from beginning of JSTF Header",
+ ),
+ ],
+ ),
+ (
+ "JstfScript",
+ [
+ (
+ "Offset",
+ "ExtenderGlyph",
+ None,
+ None,
+ "Offset to ExtenderGlyph table-from beginning of JstfScript table-may be NULL",
+ ),
+ (
+ "Offset",
+ "DefJstfLangSys",
+ None,
+ None,
+ "Offset to Default JstfLangSys table-from beginning of JstfScript table-may be NULL",
+ ),
+ (
+ "uint16",
+ "JstfLangSysCount",
+ None,
+ None,
+ "Number of JstfLangSysRecords in this table- may be zero (0)",
+ ),
+ (
+ "struct",
+ "JstfLangSysRecord",
+ "JstfLangSysCount",
+ 0,
+ "Array of JstfLangSysRecords-in alphabetical order, by JstfLangSysTag",
+ ),
+ ],
+ ),
+ (
+ "JstfLangSysRecord",
+ [
+ ("Tag", "JstfLangSysTag", None, None, "4-byte JstfLangSys identifier"),
+ (
+ "Offset",
+ "JstfLangSys",
+ None,
+ None,
+ "Offset to JstfLangSys table-from beginning of JstfScript table",
+ ),
+ ],
+ ),
+ (
+ "ExtenderGlyph",
+ [
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of Extender Glyphs in this script",
+ ),
+ (
+ "GlyphID",
+ "ExtenderGlyph",
+ "GlyphCount",
+ 0,
+ "GlyphIDs-in increasing numerical order",
+ ),
+ ],
+ ),
+ (
+ "JstfLangSys",
+ [
+ (
+ "uint16",
+ "JstfPriorityCount",
+ None,
+ None,
+ "Number of JstfPriority tables",
+ ),
+ (
+ "Offset",
+ "JstfPriority",
+ "JstfPriorityCount",
+ 0,
+ "Array of offsets to JstfPriority tables-from beginning of JstfLangSys table-in priority order",
+ ),
+ ],
+ ),
+ (
+ "JstfPriority",
+ [
+ (
+ "Offset",
+ "ShrinkageEnableGSUB",
+ None,
+ None,
+ "Offset to Shrinkage Enable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL",
+ ),
+ (
+ "Offset",
+ "ShrinkageDisableGSUB",
+ None,
+ None,
+ "Offset to Shrinkage Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL",
+ ),
+ (
+ "Offset",
+ "ShrinkageEnableGPOS",
+ None,
+ None,
+ "Offset to Shrinkage Enable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL",
+ ),
+ (
+ "Offset",
+ "ShrinkageDisableGPOS",
+ None,
+ None,
+ "Offset to Shrinkage Disable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL",
+ ),
+ (
+ "Offset",
+ "ShrinkageJstfMax",
+ None,
+ None,
+ "Offset to Shrinkage JstfMax table-from beginning of JstfPriority table -may be NULL",
+ ),
+ (
+ "Offset",
+ "ExtensionEnableGSUB",
+ None,
+ None,
+ "Offset to Extension Enable JstfGSUBModList table-may be NULL",
+ ),
+ (
+ "Offset",
+ "ExtensionDisableGSUB",
+ None,
+ None,
+ "Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL",
+ ),
+ (
+ "Offset",
+ "ExtensionEnableGPOS",
+ None,
+ None,
+ "Offset to Extension Enable JstfGSUBModList table-may be NULL",
+ ),
+ (
+ "Offset",
+ "ExtensionDisableGPOS",
+ None,
+ None,
+ "Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL",
+ ),
+ (
+ "Offset",
+ "ExtensionJstfMax",
+ None,
+ None,
+ "Offset to Extension JstfMax table-from beginning of JstfPriority table -may be NULL",
+ ),
+ ],
+ ),
+ (
+ "JstfGSUBModList",
+ [
+ (
+ "uint16",
+ "LookupCount",
+ None,
+ None,
+ "Number of lookups for this modification",
+ ),
+ (
+ "uint16",
+ "GSUBLookupIndex",
+ "LookupCount",
+ 0,
+ "Array of LookupIndex identifiers in GSUB-in increasing numerical order",
+ ),
+ ],
+ ),
+ (
+ "JstfGPOSModList",
+ [
+ (
+ "uint16",
+ "LookupCount",
+ None,
+ None,
+ "Number of lookups for this modification",
+ ),
+ (
+ "uint16",
+ "GPOSLookupIndex",
+ "LookupCount",
+ 0,
+ "Array of LookupIndex identifiers in GPOS-in increasing numerical order",
+ ),
+ ],
+ ),
+ (
+ "JstfMax",
+ [
+ (
+ "uint16",
+ "LookupCount",
+ None,
+ None,
+ "Number of lookup Indices for this modification",
+ ),
+ (
+ "Offset",
+ "Lookup",
+ "LookupCount",
+ 0,
+ "Array of offsets to GPOS-type lookup tables-from beginning of JstfMax table-in design order",
+ ),
+ ],
+ ),
+ #
+ # STAT
+ #
+ (
+ "STAT",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the table-initially set to 0x00010000, currently 0x00010002.",
+ ),
+ (
+ "uint16",
+ "DesignAxisRecordSize",
+ None,
+ None,
+ "Size in bytes of each design axis record",
+ ),
+ ("uint16", "DesignAxisCount", None, None, "Number of design axis records"),
+ (
+ "LOffsetTo(AxisRecordArray)",
+ "DesignAxisRecord",
+ None,
+ None,
+ "Offset in bytes from the beginning of the STAT table to the start of the design axes array",
+ ),
+ ("uint16", "AxisValueCount", None, None, "Number of axis value tables"),
+ (
+ "LOffsetTo(AxisValueArray)",
+ "AxisValueArray",
+ None,
+ None,
+ "Offset in bytes from the beginning of the STAT table to the start of the axes value offset array",
+ ),
+ (
+ "NameID",
+ "ElidedFallbackNameID",
+ None,
+ "Version >= 0x00010001",
+ "NameID to use when all style attributes are elided.",
+ ),
+ ],
+ ),
+ (
+ "AxisRecordArray",
+ [
+ ("AxisRecord", "Axis", "DesignAxisCount", 0, "Axis records"),
+ ],
+ ),
+ (
+ "AxisRecord",
+ [
+ (
+ "Tag",
+ "AxisTag",
+ None,
+ None,
+ "A tag identifying the axis of design variation",
+ ),
+ (
+ "NameID",
+ "AxisNameID",
+ None,
+ None,
+ 'The name ID for entries in the "name" table that provide a display string for this axis',
+ ),
+ (
+ "uint16",
+ "AxisOrdering",
+ None,
+ None,
+ "A value that applications can use to determine primary sorting of face names, or for ordering of descriptors when composing family or face names",
+ ),
+ (
+ "uint8",
+ "MoreBytes",
+ "DesignAxisRecordSize",
+ -8,
+ "Extra bytes. Set to empty array.",
+ ),
+ ],
+ ),
+ (
+ "AxisValueArray",
+ [
+ ("Offset", "AxisValue", "AxisValueCount", 0, "Axis values"),
+ ],
+ ),
+ (
+ "AxisValueFormat1",
+ [
+ ("uint16", "Format", None, None, "Format, = 1"),
+ (
+ "uint16",
+ "AxisIndex",
+ None,
+ None,
+ "Index into the axis record array identifying the axis of design variation to which the axis value record applies.",
+ ),
+ ("STATFlags", "Flags", None, None, "Flags."),
+ ("NameID", "ValueNameID", None, None, ""),
+ ("Fixed", "Value", None, None, ""),
+ ],
+ ),
+ (
+ "AxisValueFormat2",
+ [
+ ("uint16", "Format", None, None, "Format, = 2"),
+ (
+ "uint16",
+ "AxisIndex",
+ None,
+ None,
+ "Index into the axis record array identifying the axis of design variation to which the axis value record applies.",
+ ),
+ ("STATFlags", "Flags", None, None, "Flags."),
+ ("NameID", "ValueNameID", None, None, ""),
+ ("Fixed", "NominalValue", None, None, ""),
+ ("Fixed", "RangeMinValue", None, None, ""),
+ ("Fixed", "RangeMaxValue", None, None, ""),
+ ],
+ ),
+ (
+ "AxisValueFormat3",
+ [
+ ("uint16", "Format", None, None, "Format, = 3"),
+ (
+ "uint16",
+ "AxisIndex",
+ None,
+ None,
+ "Index into the axis record array identifying the axis of design variation to which the axis value record applies.",
+ ),
+ ("STATFlags", "Flags", None, None, "Flags."),
+ ("NameID", "ValueNameID", None, None, ""),
+ ("Fixed", "Value", None, None, ""),
+ ("Fixed", "LinkedValue", None, None, ""),
+ ],
+ ),
+ (
+ "AxisValueFormat4",
+ [
+ ("uint16", "Format", None, None, "Format, = 4"),
+ (
+ "uint16",
+ "AxisCount",
+ None,
+ None,
+ "The total number of axes contributing to this axis-values combination.",
+ ),
+ ("STATFlags", "Flags", None, None, "Flags."),
+ ("NameID", "ValueNameID", None, None, ""),
+ (
+ "struct",
+ "AxisValueRecord",
+ "AxisCount",
+ 0,
+ "Array of AxisValue records that provide the combination of axis values, one for each contributing axis. ",
+ ),
+ ],
+ ),
+ (
+ "AxisValueRecord",
+ [
+ (
+ "uint16",
+ "AxisIndex",
+ None,
+ None,
+ "Index into the axis record array identifying the axis of design variation to which the axis value record applies.",
+ ),
+ ("Fixed", "Value", None, None, "A numeric value for this attribute value."),
+ ],
+ ),
+ #
+ # Variation fonts
+ #
+ # GSUB/GPOS FeatureVariations
+ (
+ "FeatureVariations",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the table-initially set to 0x00010000",
+ ),
+ (
+ "uint32",
+ "FeatureVariationCount",
+ None,
+ None,
+ "Number of records in the FeatureVariationRecord array",
+ ),
+ (
+ "struct",
+ "FeatureVariationRecord",
+ "FeatureVariationCount",
+ 0,
+ "Array of FeatureVariationRecord",
+ ),
+ ],
+ ),
+ (
+ "FeatureVariationRecord",
+ [
+ (
+ "LOffset",
+ "ConditionSet",
+ None,
+ None,
+ "Offset to a ConditionSet table, from beginning of the FeatureVariations table.",
+ ),
+ (
+ "LOffset",
+ "FeatureTableSubstitution",
+ None,
+ None,
+ "Offset to a FeatureTableSubstitution table, from beginning of the FeatureVariations table",
+ ),
+ ],
+ ),
+ (
+ "ConditionSet",
+ [
+ (
+ "uint16",
+ "ConditionCount",
+ None,
+ None,
+ "Number of condition tables in the ConditionTable array",
+ ),
+ (
+ "LOffset",
+ "ConditionTable",
+ "ConditionCount",
+ 0,
+ "Array of condition tables.",
+ ),
+ ],
+ ),
+ (
+ "ConditionTableFormat1",
+ [
+ ("uint16", "Format", None, None, "Format, = 1"),
+ (
+ "uint16",
+ "AxisIndex",
+ None,
+ None,
+ "Index for the variation axis within the fvar table, base 0.",
+ ),
+ (
+ "F2Dot14",
+ "FilterRangeMinValue",
+ None,
+ None,
+ "Minimum normalized axis value of the font variation instances that satisfy this condition.",
+ ),
+ (
+ "F2Dot14",
+ "FilterRangeMaxValue",
+ None,
+ None,
+ "Maximum value that satisfies this condition.",
+ ),
+ ],
+ ),
+ (
+ "FeatureTableSubstitution",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the table-initially set to 0x00010000",
+ ),
+ (
+ "uint16",
+ "SubstitutionCount",
+ None,
+ None,
+ "Number of records in the FeatureVariationRecords array",
+ ),
+ (
+ "FeatureTableSubstitutionRecord",
+ "SubstitutionRecord",
+ "SubstitutionCount",
+ 0,
+ "Array of FeatureTableSubstitutionRecord",
+ ),
+ ],
+ ),
+ (
+ "FeatureTableSubstitutionRecord",
+ [
+ ("uint16", "FeatureIndex", None, None, "The feature table index to match."),
+ (
+ "LOffset",
+ "Feature",
+ None,
+ None,
+ "Offset to an alternate feature table, from start of the FeatureTableSubstitution table.",
+ ),
+ ],
+ ),
+ # VariationStore
+ (
+ "VarRegionAxis",
+ [
+ ("F2Dot14", "StartCoord", None, None, ""),
+ ("F2Dot14", "PeakCoord", None, None, ""),
+ ("F2Dot14", "EndCoord", None, None, ""),
+ ],
+ ),
+ (
+ "VarRegion",
+ [
+ ("struct", "VarRegionAxis", "RegionAxisCount", 0, ""),
+ ],
+ ),
+ (
+ "VarRegionList",
+ [
+ ("uint16", "RegionAxisCount", None, None, ""),
+ ("uint16", "RegionCount", None, None, ""),
+ ("VarRegion", "Region", "RegionCount", 0, ""),
+ ],
+ ),
+ (
+ "VarData",
+ [
+ ("uint16", "ItemCount", None, None, ""),
+ ("uint16", "NumShorts", None, None, ""),
+ ("uint16", "VarRegionCount", None, None, ""),
+ ("uint16", "VarRegionIndex", "VarRegionCount", 0, ""),
+ ("VarDataValue", "Item", "ItemCount", 0, ""),
+ ],
+ ),
+ (
+ "VarStore",
+ [
+ ("uint16", "Format", None, None, "Set to 1."),
+ ("LOffset", "VarRegionList", None, None, ""),
+ ("uint16", "VarDataCount", None, None, ""),
+ ("LOffset", "VarData", "VarDataCount", 0, ""),
+ ],
+ ),
+ # Variation helpers
+ (
+ "VarIdxMap",
+ [
+ ("uint16", "EntryFormat", None, None, ""), # Automatically computed
+ ("uint16", "MappingCount", None, None, ""), # Automatically computed
+ ("VarIdxMapValue", "mapping", "", 0, "Array of compressed data"),
+ ],
+ ),
+ (
+ "DeltaSetIndexMapFormat0",
+ [
+ ("uint8", "Format", None, None, "Format of the DeltaSetIndexMap = 0"),
+ ("uint8", "EntryFormat", None, None, ""), # Automatically computed
+ ("uint16", "MappingCount", None, None, ""), # Automatically computed
+ ("VarIdxMapValue", "mapping", "", 0, "Array of compressed data"),
+ ],
+ ),
+ (
+ "DeltaSetIndexMapFormat1",
+ [
+ ("uint8", "Format", None, None, "Format of the DeltaSetIndexMap = 1"),
+ ("uint8", "EntryFormat", None, None, ""), # Automatically computed
+ ("uint32", "MappingCount", None, None, ""), # Automatically computed
+ ("VarIdxMapValue", "mapping", "", 0, "Array of compressed data"),
+ ],
+ ),
+ # Glyph advance variations
+ (
+ "HVAR",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the HVAR table-initially = 0x00010000",
+ ),
+ ("LOffset", "VarStore", None, None, ""),
+ ("LOffsetTo(VarIdxMap)", "AdvWidthMap", None, None, ""),
+ ("LOffsetTo(VarIdxMap)", "LsbMap", None, None, ""),
+ ("LOffsetTo(VarIdxMap)", "RsbMap", None, None, ""),
+ ],
+ ),
+ (
+ "VVAR",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the VVAR table-initially = 0x00010000",
+ ),
+ ("LOffset", "VarStore", None, None, ""),
+ ("LOffsetTo(VarIdxMap)", "AdvHeightMap", None, None, ""),
+ ("LOffsetTo(VarIdxMap)", "TsbMap", None, None, ""),
+ ("LOffsetTo(VarIdxMap)", "BsbMap", None, None, ""),
+ ("LOffsetTo(VarIdxMap)", "VOrgMap", None, None, "Vertical origin mapping."),
+ ],
+ ),
+ # Font-wide metrics variations
+ (
+ "MetricsValueRecord",
+ [
+ ("Tag", "ValueTag", None, None, "4-byte font-wide measure identifier"),
+ ("uint32", "VarIdx", None, None, "Combined outer-inner variation index"),
+ (
+ "uint8",
+ "MoreBytes",
+ "ValueRecordSize",
+ -8,
+ "Extra bytes. Set to empty array.",
+ ),
+ ],
+ ),
+ (
+ "MVAR",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the MVAR table-initially = 0x00010000",
+ ),
+ ("uint16", "Reserved", None, None, "Set to 0"),
+ ("uint16", "ValueRecordSize", None, None, ""),
+ ("uint16", "ValueRecordCount", None, None, ""),
+ ("Offset", "VarStore", None, None, ""),
+ ("MetricsValueRecord", "ValueRecord", "ValueRecordCount", 0, ""),
+ ],
+ ),
+ #
+ # math
+ #
+ (
+ "MATH",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the MATH table-initially set to 0x00010000.",
+ ),
+ (
+ "Offset",
+ "MathConstants",
+ None,
+ None,
+ "Offset to MathConstants table - from the beginning of MATH table.",
+ ),
+ (
+ "Offset",
+ "MathGlyphInfo",
+ None,
+ None,
+ "Offset to MathGlyphInfo table - from the beginning of MATH table.",
+ ),
+ (
+ "Offset",
+ "MathVariants",
+ None,
+ None,
+ "Offset to MathVariants table - from the beginning of MATH table.",
+ ),
+ ],
+ ),
+ (
+ "MathValueRecord",
+ [
+ ("int16", "Value", None, None, "The X or Y value in design units."),
+ (
+ "Offset",
+ "DeviceTable",
+ None,
+ None,
+ "Offset to the device table - from the beginning of parent table. May be NULL. Suggested format for device table is 1.",
+ ),
+ ],
+ ),
+ (
+ "MathConstants",
+ [
+ (
+ "int16",
+ "ScriptPercentScaleDown",
+ None,
+ None,
+ "Percentage of scaling down for script level 1. Suggested value: 80%.",
+ ),
+ (
+ "int16",
+ "ScriptScriptPercentScaleDown",
+ None,
+ None,
+ "Percentage of scaling down for script level 2 (ScriptScript). Suggested value: 60%.",
+ ),
+ (
+ "uint16",
+ "DelimitedSubFormulaMinHeight",
+ None,
+ None,
+ "Minimum height required for a delimited expression to be treated as a subformula. Suggested value: normal line height x1.5.",
+ ),
+ (
+ "uint16",
+ "DisplayOperatorMinHeight",
+ None,
+ None,
+ "Minimum height of n-ary operators (such as integral and summation) for formulas in display mode.",
+ ),
+ (
+ "MathValueRecord",
+ "MathLeading",
+ None,
+ None,
+ "White space to be left between math formulas to ensure proper line spacing. For example, for applications that treat line gap as a part of line ascender, formulas with ink going above (os2.sTypoAscender + os2.sTypoLineGap - MathLeading) or with ink going below os2.sTypoDescender will result in increasing line height.",
+ ),
+ ("MathValueRecord", "AxisHeight", None, None, "Axis height of the font."),
+ (
+ "MathValueRecord",
+ "AccentBaseHeight",
+ None,
+ None,
+ "Maximum (ink) height of accent base that does not require raising the accents. Suggested: x-height of the font (os2.sxHeight) plus any possible overshots.",
+ ),
+ (
+ "MathValueRecord",
+ "FlattenedAccentBaseHeight",
+ None,
+ None,
+ "Maximum (ink) height of accent base that does not require flattening the accents. Suggested: cap height of the font (os2.sCapHeight).",
+ ),
+ (
+ "MathValueRecord",
+ "SubscriptShiftDown",
+ None,
+ None,
+ "The standard shift down applied to subscript elements. Positive for moving in the downward direction. Suggested: os2.ySubscriptYOffset.",
+ ),
+ (
+ "MathValueRecord",
+ "SubscriptTopMax",
+ None,
+ None,
+ "Maximum allowed height of the (ink) top of subscripts that does not require moving subscripts further down. Suggested: 4/5 x-height.",
+ ),
+ (
+ "MathValueRecord",
+ "SubscriptBaselineDropMin",
+ None,
+ None,
+ "Minimum allowed drop of the baseline of subscripts relative to the (ink) bottom of the base. Checked for bases that are treated as a box or extended shape. Positive for subscript baseline dropped below the base bottom.",
+ ),
+ (
+ "MathValueRecord",
+ "SuperscriptShiftUp",
+ None,
+ None,
+ "Standard shift up applied to superscript elements. Suggested: os2.ySuperscriptYOffset.",
+ ),
+ (
+ "MathValueRecord",
+ "SuperscriptShiftUpCramped",
+ None,
+ None,
+ "Standard shift of superscripts relative to the base, in cramped style.",
+ ),
+ (
+ "MathValueRecord",
+ "SuperscriptBottomMin",
+ None,
+ None,
+ "Minimum allowed height of the (ink) bottom of superscripts that does not require moving subscripts further up. Suggested: 1/4 x-height.",
+ ),
+ (
+ "MathValueRecord",
+ "SuperscriptBaselineDropMax",
+ None,
+ None,
+ "Maximum allowed drop of the baseline of superscripts relative to the (ink) top of the base. Checked for bases that are treated as a box or extended shape. Positive for superscript baseline below the base top.",
+ ),
+ (
+ "MathValueRecord",
+ "SubSuperscriptGapMin",
+ None,
+ None,
+ "Minimum gap between the superscript and subscript ink. Suggested: 4x default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "SuperscriptBottomMaxWithSubscript",
+ None,
+ None,
+ "The maximum level to which the (ink) bottom of superscript can be pushed to increase the gap between superscript and subscript, before subscript starts being moved down. Suggested: 4/5 x-height.",
+ ),
+ (
+ "MathValueRecord",
+ "SpaceAfterScript",
+ None,
+ None,
+ "Extra white space to be added after each subscript and superscript. Suggested: 0.5pt for a 12 pt font.",
+ ),
+ (
+ "MathValueRecord",
+ "UpperLimitGapMin",
+ None,
+ None,
+ "Minimum gap between the (ink) bottom of the upper limit, and the (ink) top of the base operator.",
+ ),
+ (
+ "MathValueRecord",
+ "UpperLimitBaselineRiseMin",
+ None,
+ None,
+ "Minimum distance between baseline of upper limit and (ink) top of the base operator.",
+ ),
+ (
+ "MathValueRecord",
+ "LowerLimitGapMin",
+ None,
+ None,
+ "Minimum gap between (ink) top of the lower limit, and (ink) bottom of the base operator.",
+ ),
+ (
+ "MathValueRecord",
+ "LowerLimitBaselineDropMin",
+ None,
+ None,
+ "Minimum distance between baseline of the lower limit and (ink) bottom of the base operator.",
+ ),
+ (
+ "MathValueRecord",
+ "StackTopShiftUp",
+ None,
+ None,
+ "Standard shift up applied to the top element of a stack.",
+ ),
+ (
+ "MathValueRecord",
+ "StackTopDisplayStyleShiftUp",
+ None,
+ None,
+ "Standard shift up applied to the top element of a stack in display style.",
+ ),
+ (
+ "MathValueRecord",
+ "StackBottomShiftDown",
+ None,
+ None,
+ "Standard shift down applied to the bottom element of a stack. Positive for moving in the downward direction.",
+ ),
+ (
+ "MathValueRecord",
+ "StackBottomDisplayStyleShiftDown",
+ None,
+ None,
+ "Standard shift down applied to the bottom element of a stack in display style. Positive for moving in the downward direction.",
+ ),
+ (
+ "MathValueRecord",
+ "StackGapMin",
+ None,
+ None,
+ "Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element. Suggested: 3x default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "StackDisplayStyleGapMin",
+ None,
+ None,
+ "Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element in display style. Suggested: 7x default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "StretchStackTopShiftUp",
+ None,
+ None,
+ "Standard shift up applied to the top element of the stretch stack.",
+ ),
+ (
+ "MathValueRecord",
+ "StretchStackBottomShiftDown",
+ None,
+ None,
+ "Standard shift down applied to the bottom element of the stretch stack. Positive for moving in the downward direction.",
+ ),
+ (
+ "MathValueRecord",
+ "StretchStackGapAboveMin",
+ None,
+ None,
+ "Minimum gap between the ink of the stretched element, and the (ink) bottom of the element above. Suggested: UpperLimitGapMin",
+ ),
+ (
+ "MathValueRecord",
+ "StretchStackGapBelowMin",
+ None,
+ None,
+ "Minimum gap between the ink of the stretched element, and the (ink) top of the element below. Suggested: LowerLimitGapMin.",
+ ),
+ (
+ "MathValueRecord",
+ "FractionNumeratorShiftUp",
+ None,
+ None,
+ "Standard shift up applied to the numerator.",
+ ),
+ (
+ "MathValueRecord",
+ "FractionNumeratorDisplayStyleShiftUp",
+ None,
+ None,
+ "Standard shift up applied to the numerator in display style. Suggested: StackTopDisplayStyleShiftUp.",
+ ),
+ (
+ "MathValueRecord",
+ "FractionDenominatorShiftDown",
+ None,
+ None,
+ "Standard shift down applied to the denominator. Positive for moving in the downward direction.",
+ ),
+ (
+ "MathValueRecord",
+ "FractionDenominatorDisplayStyleShiftDown",
+ None,
+ None,
+ "Standard shift down applied to the denominator in display style. Positive for moving in the downward direction. Suggested: StackBottomDisplayStyleShiftDown.",
+ ),
+ (
+ "MathValueRecord",
+ "FractionNumeratorGapMin",
+ None,
+ None,
+ "Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar. Suggested: default rule thickness",
+ ),
+ (
+ "MathValueRecord",
+ "FractionNumDisplayStyleGapMin",
+ None,
+ None,
+ "Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "FractionRuleThickness",
+ None,
+ None,
+ "Thickness of the fraction bar. Suggested: default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "FractionDenominatorGapMin",
+ None,
+ None,
+ "Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar. Suggested: default rule thickness",
+ ),
+ (
+ "MathValueRecord",
+ "FractionDenomDisplayStyleGapMin",
+ None,
+ None,
+ "Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "SkewedFractionHorizontalGap",
+ None,
+ None,
+ "Horizontal distance between the top and bottom elements of a skewed fraction.",
+ ),
+ (
+ "MathValueRecord",
+ "SkewedFractionVerticalGap",
+ None,
+ None,
+ "Vertical distance between the ink of the top and bottom elements of a skewed fraction.",
+ ),
+ (
+ "MathValueRecord",
+ "OverbarVerticalGap",
+ None,
+ None,
+ "Distance between the overbar and the (ink) top of he base. Suggested: 3x default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "OverbarRuleThickness",
+ None,
+ None,
+ "Thickness of overbar. Suggested: default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "OverbarExtraAscender",
+ None,
+ None,
+ "Extra white space reserved above the overbar. Suggested: default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "UnderbarVerticalGap",
+ None,
+ None,
+ "Distance between underbar and (ink) bottom of the base. Suggested: 3x default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "UnderbarRuleThickness",
+ None,
+ None,
+ "Thickness of underbar. Suggested: default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "UnderbarExtraDescender",
+ None,
+ None,
+ "Extra white space reserved below the underbar. Always positive. Suggested: default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "RadicalVerticalGap",
+ None,
+ None,
+ "Space between the (ink) top of the expression and the bar over it. Suggested: 1 1/4 default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "RadicalDisplayStyleVerticalGap",
+ None,
+ None,
+ "Space between the (ink) top of the expression and the bar over it. Suggested: default rule thickness + 1/4 x-height.",
+ ),
+ (
+ "MathValueRecord",
+ "RadicalRuleThickness",
+ None,
+ None,
+ "Thickness of the radical rule. This is the thickness of the rule in designed or constructed radical signs. Suggested: default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "RadicalExtraAscender",
+ None,
+ None,
+ "Extra white space reserved above the radical. Suggested: RadicalRuleThickness.",
+ ),
+ (
+ "MathValueRecord",
+ "RadicalKernBeforeDegree",
+ None,
+ None,
+ "Extra horizontal kern before the degree of a radical, if such is present. Suggested: 5/18 of em.",
+ ),
+ (
+ "MathValueRecord",
+ "RadicalKernAfterDegree",
+ None,
+ None,
+ "Negative kern after the degree of a radical, if such is present. Suggested: 10/18 of em.",
+ ),
+ (
+ "uint16",
+ "RadicalDegreeBottomRaisePercent",
+ None,
+ None,
+ "Height of the bottom of the radical degree, if such is present, in proportion to the ascender of the radical sign. Suggested: 60%.",
+ ),
+ ],
+ ),
+ (
+ "MathGlyphInfo",
+ [
+ (
+ "Offset",
+ "MathItalicsCorrectionInfo",
+ None,
+ None,
+ "Offset to MathItalicsCorrectionInfo table - from the beginning of MathGlyphInfo table.",
+ ),
+ (
+ "Offset",
+ "MathTopAccentAttachment",
+ None,
+ None,
+ "Offset to MathTopAccentAttachment table - from the beginning of MathGlyphInfo table.",
+ ),
+ (
+ "Offset",
+ "ExtendedShapeCoverage",
+ None,
+ None,
+ "Offset to coverage table for Extended Shape glyphs - from the beginning of MathGlyphInfo table. When the left or right glyph of a box is an extended shape variant, the (ink) box (and not the default position defined by values in MathConstants table) should be used for vertical positioning purposes. May be NULL.",
+ ),
+ (
+ "Offset",
+ "MathKernInfo",
+ None,
+ None,
+ "Offset to MathKernInfo table - from the beginning of MathGlyphInfo table.",
+ ),
+ ],
+ ),
+ (
+ "MathItalicsCorrectionInfo",
+ [
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table - from the beginning of MathItalicsCorrectionInfo table.",
+ ),
+ (
+ "uint16",
+ "ItalicsCorrectionCount",
+ None,
+ None,
+ "Number of italics correction values. Should coincide with the number of covered glyphs.",
+ ),
+ (
+ "MathValueRecord",
+ "ItalicsCorrection",
+ "ItalicsCorrectionCount",
+ 0,
+ "Array of MathValueRecords defining italics correction values for each covered glyph.",
+ ),
+ ],
+ ),
+ (
+ "MathTopAccentAttachment",
+ [
+ (
+ "Offset",
+ "TopAccentCoverage",
+ None,
+ None,
+ "Offset to Coverage table - from the beginning of MathTopAccentAttachment table.",
+ ),
+ (
+ "uint16",
+ "TopAccentAttachmentCount",
+ None,
+ None,
+ "Number of top accent attachment point values. Should coincide with the number of covered glyphs",
+ ),
+ (
+ "MathValueRecord",
+ "TopAccentAttachment",
+ "TopAccentAttachmentCount",
+ 0,
+ "Array of MathValueRecords defining top accent attachment points for each covered glyph",
+ ),
+ ],
+ ),
+ (
+ "MathKernInfo",
+ [
+ (
+ "Offset",
+ "MathKernCoverage",
+ None,
+ None,
+ "Offset to Coverage table - from the beginning of the MathKernInfo table.",
+ ),
+ ("uint16", "MathKernCount", None, None, "Number of MathKernInfoRecords."),
+ (
+ "MathKernInfoRecord",
+ "MathKernInfoRecords",
+ "MathKernCount",
+ 0,
+ "Array of MathKernInfoRecords, per-glyph information for mathematical positioning of subscripts and superscripts.",
+ ),
+ ],
+ ),
+ (
+ "MathKernInfoRecord",
+ [
+ (
+ "Offset",
+ "TopRightMathKern",
+ None,
+ None,
+ "Offset to MathKern table for top right corner - from the beginning of MathKernInfo table. May be NULL.",
+ ),
+ (
+ "Offset",
+ "TopLeftMathKern",
+ None,
+ None,
+ "Offset to MathKern table for the top left corner - from the beginning of MathKernInfo table. May be NULL.",
+ ),
+ (
+ "Offset",
+ "BottomRightMathKern",
+ None,
+ None,
+ "Offset to MathKern table for bottom right corner - from the beginning of MathKernInfo table. May be NULL.",
+ ),
+ (
+ "Offset",
+ "BottomLeftMathKern",
+ None,
+ None,
+ "Offset to MathKern table for bottom left corner - from the beginning of MathKernInfo table. May be NULL.",
+ ),
+ ],
+ ),
+ (
+ "MathKern",
+ [
+ (
+ "uint16",
+ "HeightCount",
+ None,
+ None,
+ "Number of heights on which the kern value changes.",
+ ),
+ (
+ "MathValueRecord",
+ "CorrectionHeight",
+ "HeightCount",
+ 0,
+ "Array of correction heights at which the kern value changes. Sorted by the height value in design units.",
+ ),
+ (
+ "MathValueRecord",
+ "KernValue",
+ "HeightCount",
+ 1,
+ "Array of kern values corresponding to heights. First value is the kern value for all heights less or equal than the first height in this table.Last value is the value to be applied for all heights greater than the last height in this table. Negative values are interpreted as move glyphs closer to each other.",
+ ),
+ ],
+ ),
+ (
+ "MathVariants",
+ [
+ (
+ "uint16",
+ "MinConnectorOverlap",
+ None,
+ None,
+ "Minimum overlap of connecting glyphs during glyph construction, in design units.",
+ ),
+ (
+ "Offset",
+ "VertGlyphCoverage",
+ None,
+ None,
+ "Offset to Coverage table - from the beginning of MathVariants table.",
+ ),
+ (
+ "Offset",
+ "HorizGlyphCoverage",
+ None,
+ None,
+ "Offset to Coverage table - from the beginning of MathVariants table.",
+ ),
+ (
+ "uint16",
+ "VertGlyphCount",
+ None,
+ None,
+ "Number of glyphs for which information is provided for vertically growing variants.",
+ ),
+ (
+ "uint16",
+ "HorizGlyphCount",
+ None,
+ None,
+ "Number of glyphs for which information is provided for horizontally growing variants.",
+ ),
+ (
+ "Offset",
+ "VertGlyphConstruction",
+ "VertGlyphCount",
+ 0,
+ "Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in vertical direction.",
+ ),
+ (
+ "Offset",
+ "HorizGlyphConstruction",
+ "HorizGlyphCount",
+ 0,
+ "Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in horizontal direction.",
+ ),
+ ],
+ ),
+ (
+ "MathGlyphConstruction",
+ [
+ (
+ "Offset",
+ "GlyphAssembly",
+ None,
+ None,
+ "Offset to GlyphAssembly table for this shape - from the beginning of MathGlyphConstruction table. May be NULL",
+ ),
+ (
+ "uint16",
+ "VariantCount",
+ None,
+ None,
+ "Count of glyph growing variants for this glyph.",
+ ),
+ (
+ "MathGlyphVariantRecord",
+ "MathGlyphVariantRecord",
+ "VariantCount",
+ 0,
+ "MathGlyphVariantRecords for alternative variants of the glyphs.",
+ ),
+ ],
+ ),
+ (
+ "MathGlyphVariantRecord",
+ [
+ ("GlyphID", "VariantGlyph", None, None, "Glyph ID for the variant."),
+ (
+ "uint16",
+ "AdvanceMeasurement",
+ None,
+ None,
+ "Advance width/height, in design units, of the variant, in the direction of requested glyph extension.",
+ ),
+ ],
+ ),
+ (
+ "GlyphAssembly",
+ [
+ (
+ "MathValueRecord",
+ "ItalicsCorrection",
+ None,
+ None,
+ "Italics correction of this GlyphAssembly. Should not depend on the assembly size.",
+ ),
+ ("uint16", "PartCount", None, None, "Number of parts in this assembly."),
+ (
+ "GlyphPartRecord",
+ "PartRecords",
+ "PartCount",
+ 0,
+ "Array of part records, from left to right and bottom to top.",
+ ),
+ ],
+ ),
+ (
+ "GlyphPartRecord",
+ [
+ ("GlyphID", "glyph", None, None, "Glyph ID for the part."),
+ (
+ "uint16",
+ "StartConnectorLength",
+ None,
+ None,
+ "Advance width/ height of the straight bar connector material, in design units, is at the beginning of the glyph, in the direction of the extension.",
+ ),
+ (
+ "uint16",
+ "EndConnectorLength",
+ None,
+ None,
+ "Advance width/ height of the straight bar connector material, in design units, is at the end of the glyph, in the direction of the extension.",
+ ),
+ (
+ "uint16",
+ "FullAdvance",
+ None,
+ None,
+ "Full advance width/height for this part, in the direction of the extension. In design units.",
+ ),
+ (
+ "uint16",
+ "PartFlags",
+ None,
+ None,
+ "Part qualifiers. PartFlags enumeration currently uses only one bit: 0x0001 fExtender: If set, the part can be skipped or repeated. 0xFFFE Reserved",
+ ),
+ ],
+ ),
+ ##
+ ## Apple Advanced Typography (AAT) tables
+ ##
+ (
+ "AATLookupSegment",
+ [
+ ("uint16", "lastGlyph", None, None, "Last glyph index in this segment."),
+ ("uint16", "firstGlyph", None, None, "First glyph index in this segment."),
+ (
+ "uint16",
+ "value",
+ None,
+ None,
+ "A 16-bit offset from the start of the table to the data.",
+ ),
+ ],
+ ),
+ #
+ # ankr
+ #
+ (
+ "ankr",
+ [
+ ("struct", "AnchorPoints", None, None, "Anchor points table."),
+ ],
+ ),
+ (
+ "AnchorPointsFormat0",
+ [
+ ("uint16", "Format", None, None, "Format of the anchor points table, = 0."),
+ ("uint16", "Flags", None, None, "Flags. Currenty unused, set to zero."),
+ (
+ "AATLookupWithDataOffset(AnchorGlyphData)",
+ "Anchors",
+ None,
+ None,
+ "Table of with anchor overrides for each glyph.",
+ ),
+ ],
+ ),
+ (
+ "AnchorGlyphData",
+ [
+ (
+ "uint32",
+ "AnchorPointCount",
+ None,
+ None,
+ "Number of anchor points for this glyph.",
+ ),
+ (
+ "struct",
+ "AnchorPoint",
+ "AnchorPointCount",
+ 0,
+ "Individual anchor points.",
+ ),
+ ],
+ ),
+ (
+ "AnchorPoint",
+ [
+ ("int16", "XCoordinate", None, None, "X coordinate of this anchor point."),
+ ("int16", "YCoordinate", None, None, "Y coordinate of this anchor point."),
+ ],
+ ),
+ #
+ # bsln
+ #
+ (
+ "bsln",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version number of the AAT baseline table (0x00010000 for the initial version).",
+ ),
+ ("struct", "Baseline", None, None, "Baseline table."),
+ ],
+ ),
+ (
+ "BaselineFormat0",
+ [
+ ("uint16", "Format", None, None, "Format of the baseline table, = 0."),
+ (
+ "uint16",
+ "DefaultBaseline",
+ None,
+ None,
+ "Default baseline value for all glyphs. This value can be from 0 through 31.",
+ ),
+ (
+ "uint16",
+ "Delta",
+ 32,
+ 0,
+ "These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.",
+ ),
+ ],
+ ),
+ (
+ "BaselineFormat1",
+ [
+ ("uint16", "Format", None, None, "Format of the baseline table, = 1."),
+ (
+ "uint16",
+ "DefaultBaseline",
+ None,
+ None,
+ "Default baseline value for all glyphs. This value can be from 0 through 31.",
+ ),
+ (
+ "uint16",
+ "Delta",
+ 32,
+ 0,
+ "These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.",
+ ),
+ (
+ "AATLookup(uint16)",
+ "BaselineValues",
+ None,
+ None,
+ "Lookup table that maps glyphs to their baseline values.",
+ ),
+ ],
+ ),
+ (
+ "BaselineFormat2",
+ [
+ ("uint16", "Format", None, None, "Format of the baseline table, = 1."),
+ (
+ "uint16",
+ "DefaultBaseline",
+ None,
+ None,
+ "Default baseline value for all glyphs. This value can be from 0 through 31.",
+ ),
+ (
+ "GlyphID",
+ "StandardGlyph",
+ None,
+ None,
+ "Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.",
+ ),
+ (
+ "uint16",
+ "ControlPoint",
+ 32,
+ 0,
+ "Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.",
+ ),
+ ],
+ ),
+ (
+ "BaselineFormat3",
+ [
+ ("uint16", "Format", None, None, "Format of the baseline table, = 1."),
+ (
+ "uint16",
+ "DefaultBaseline",
+ None,
+ None,
+ "Default baseline value for all glyphs. This value can be from 0 through 31.",
+ ),
+ (
+ "GlyphID",
+ "StandardGlyph",
+ None,
+ None,
+ "Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.",
+ ),
+ (
+ "uint16",
+ "ControlPoint",
+ 32,
+ 0,
+ "Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.",
+ ),
+ (
+ "AATLookup(uint16)",
+ "BaselineValues",
+ None,
+ None,
+ "Lookup table that maps glyphs to their baseline values.",
+ ),
+ ],
+ ),
+ #
+ # cidg
+ #
+ (
+ "cidg",
+ [
+ ("struct", "CIDGlyphMapping", None, None, "CID-to-glyph mapping table."),
+ ],
+ ),
+ (
+ "CIDGlyphMappingFormat0",
+ [
+ (
+ "uint16",
+ "Format",
+ None,
+ None,
+ "Format of the CID-to-glyph mapping table, = 0.",
+ ),
+ ("uint16", "DataFormat", None, None, "Currenty unused, set to zero."),
+ ("uint32", "StructLength", None, None, "Size of the table in bytes."),
+ ("uint16", "Registry", None, None, "The registry ID."),
+ (
+ "char64",
+ "RegistryName",
+ None,
+ None,
+ "The registry name in ASCII; unused bytes should be set to 0.",
+ ),
+ ("uint16", "Order", None, None, "The order ID."),
+ (
+ "char64",
+ "OrderName",
+ None,
+ None,
+ "The order name in ASCII; unused bytes should be set to 0.",
+ ),
+ ("uint16", "SupplementVersion", None, None, "The supplement version."),
+ (
+ "CIDGlyphMap",
+ "Mapping",
+ None,
+ None,
+ "A mapping from CIDs to the glyphs in the font, starting with CID 0. If a CID from the identified collection has no glyph in the font, 0xFFFF is used",
+ ),
+ ],
+ ),
+ #
+ # feat
+ #
+ (
+ "feat",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the feat table-initially set to 0x00010000.",
+ ),
+ ("FeatureNames", "FeatureNames", None, None, "The feature names."),
+ ],
+ ),
+ (
+ "FeatureNames",
+ [
+ (
+ "uint16",
+ "FeatureNameCount",
+ None,
+ None,
+ "Number of entries in the feature name array.",
+ ),
+ ("uint16", "Reserved1", None, None, "Reserved (set to zero)."),
+ ("uint32", "Reserved2", None, None, "Reserved (set to zero)."),
+ (
+ "FeatureName",
+ "FeatureName",
+ "FeatureNameCount",
+ 0,
+ "The feature name array.",
+ ),
+ ],
+ ),
+ (
+ "FeatureName",
+ [
+ ("uint16", "FeatureType", None, None, "Feature type."),
+ (
+ "uint16",
+ "SettingsCount",
+ None,
+ None,
+ "The number of records in the setting name array.",
+ ),
+ (
+ "LOffset",
+ "Settings",
+ None,
+ None,
+ "Offset to setting table for this feature.",
+ ),
+ (
+ "uint16",
+ "FeatureFlags",
+ None,
+ None,
+ "Single-bit flags associated with the feature type.",
+ ),
+ (
+ "NameID",
+ "FeatureNameID",
+ None,
+ None,
+ "The name table index for the feature name.",
+ ),
+ ],
+ ),
+ (
+ "Settings",
+ [
+ ("Setting", "Setting", "SettingsCount", 0, "The setting array."),
+ ],
+ ),
+ (
+ "Setting",
+ [
+ ("uint16", "SettingValue", None, None, "The setting."),
+ (
+ "NameID",
+ "SettingNameID",
+ None,
+ None,
+ "The name table index for the setting name.",
+ ),
+ ],
+ ),
+ #
+ # gcid
+ #
+ (
+ "gcid",
+ [
+ ("struct", "GlyphCIDMapping", None, None, "Glyph to CID mapping table."),
+ ],
+ ),
+ (
+ "GlyphCIDMappingFormat0",
+ [
+ (
+ "uint16",
+ "Format",
+ None,
+ None,
+ "Format of the glyph-to-CID mapping table, = 0.",
+ ),
+ ("uint16", "DataFormat", None, None, "Currenty unused, set to zero."),
+ ("uint32", "StructLength", None, None, "Size of the table in bytes."),
+ ("uint16", "Registry", None, None, "The registry ID."),
+ (
+ "char64",
+ "RegistryName",
+ None,
+ None,
+ "The registry name in ASCII; unused bytes should be set to 0.",
+ ),
+ ("uint16", "Order", None, None, "The order ID."),
+ (
+ "char64",
+ "OrderName",
+ None,
+ None,
+ "The order name in ASCII; unused bytes should be set to 0.",
+ ),
+ ("uint16", "SupplementVersion", None, None, "The supplement version."),
+ (
+ "GlyphCIDMap",
+ "Mapping",
+ None,
+ None,
+ "The CIDs for the glyphs in the font, starting with glyph 0. If a glyph does not correspond to a CID in the identified collection, 0xFFFF is used",
+ ),
+ ],
+ ),
+ #
+ # lcar
+ #
+ (
+ "lcar",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version number of the ligature caret table (0x00010000 for the initial version).",
+ ),
+ ("struct", "LigatureCarets", None, None, "Ligature carets table."),
+ ],
+ ),
+ (
+ "LigatureCaretsFormat0",
+ [
+ (
+ "uint16",
+ "Format",
+ None,
+ None,
+ "Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.",
+ ),
+ (
+ "AATLookup(LigCaretDistances)",
+ "Carets",
+ None,
+ None,
+ "Lookup table associating ligature glyphs with their caret positions, in font unit distances.",
+ ),
+ ],
+ ),
+ (
+ "LigatureCaretsFormat1",
+ [
+ (
+ "uint16",
+ "Format",
+ None,
+ None,
+ "Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.",
+ ),
+ (
+ "AATLookup(LigCaretPoints)",
+ "Carets",
+ None,
+ None,
+ "Lookup table associating ligature glyphs with their caret positions, as control points.",
+ ),
+ ],
+ ),
+ (
+ "LigCaretDistances",
+ [
+ ("uint16", "DivsionPointCount", None, None, "Number of division points."),
+ (
+ "int16",
+ "DivisionPoint",
+ "DivsionPointCount",
+ 0,
+ "Distance in font units through which a subdivision is made orthogonally to the baseline.",
+ ),
+ ],
+ ),
+ (
+ "LigCaretPoints",
+ [
+ ("uint16", "DivsionPointCount", None, None, "Number of division points."),
+ (
+ "int16",
+ "DivisionPoint",
+ "DivsionPointCount",
+ 0,
+ "The number of the control point through which a subdivision is made orthogonally to the baseline.",
+ ),
+ ],
+ ),
+ #
+ # mort
+ #
+ (
+ "mort",
+ [
+ ("Version", "Version", None, None, "Version of the mort table."),
+ (
+ "uint32",
+ "MorphChainCount",
+ None,
+ None,
+ "Number of metamorphosis chains.",
+ ),
+ (
+ "MortChain",
+ "MorphChain",
+ "MorphChainCount",
+ 0,
+ "Array of metamorphosis chains.",
+ ),
+ ],
+ ),
+ (
+ "MortChain",
+ [
+ (
+ "Flags32",
+ "DefaultFlags",
+ None,
+ None,
+ "The default specification for subtables.",
+ ),
+ (
+ "uint32",
+ "StructLength",
+ None,
+ None,
+ "Total byte count, including this header; must be a multiple of 4.",
+ ),
+ (
+ "uint16",
+ "MorphFeatureCount",
+ None,
+ None,
+ "Number of metamorphosis feature entries.",
+ ),
+ (
+ "uint16",
+ "MorphSubtableCount",
+ None,
+ None,
+ "The number of subtables in the chain.",
+ ),
+ (
+ "struct",
+ "MorphFeature",
+ "MorphFeatureCount",
+ 0,
+ "Array of metamorphosis features.",
+ ),
+ (
+ "MortSubtable",
+ "MorphSubtable",
+ "MorphSubtableCount",
+ 0,
+ "Array of metamorphosis subtables.",
+ ),
+ ],
+ ),
+ (
+ "MortSubtable",
+ [
+ (
+ "uint16",
+ "StructLength",
+ None,
+ None,
+ "Total subtable length, including this header.",
+ ),
+ (
+ "uint8",
+ "CoverageFlags",
+ None,
+ None,
+ "Most significant byte of coverage flags.",
+ ),
+ ("uint8", "MorphType", None, None, "Subtable type."),
+ (
+ "Flags32",
+ "SubFeatureFlags",
+ None,
+ None,
+ "The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).",
+ ),
+ ("SubStruct", "SubStruct", None, None, "SubTable."),
+ ],
+ ),
+ #
+ # morx
+ #
+ (
+ "morx",
+ [
+ ("uint16", "Version", None, None, "Version of the morx table."),
+ ("uint16", "Reserved", None, None, "Reserved (set to zero)."),
+ (
+ "uint32",
+ "MorphChainCount",
+ None,
+ None,
+ "Number of extended metamorphosis chains.",
+ ),
+ (
+ "MorxChain",
+ "MorphChain",
+ "MorphChainCount",
+ 0,
+ "Array of extended metamorphosis chains.",
+ ),
+ ],
+ ),
+ (
+ "MorxChain",
+ [
+ (
+ "Flags32",
+ "DefaultFlags",
+ None,
+ None,
+ "The default specification for subtables.",
+ ),
+ (
+ "uint32",
+ "StructLength",
+ None,
+ None,
+ "Total byte count, including this header; must be a multiple of 4.",
+ ),
+ (
+ "uint32",
+ "MorphFeatureCount",
+ None,
+ None,
+ "Number of feature subtable entries.",
+ ),
+ (
+ "uint32",
+ "MorphSubtableCount",
+ None,
+ None,
+ "The number of subtables in the chain.",
+ ),
+ (
+ "MorphFeature",
+ "MorphFeature",
+ "MorphFeatureCount",
+ 0,
+ "Array of metamorphosis features.",
+ ),
+ (
+ "MorxSubtable",
+ "MorphSubtable",
+ "MorphSubtableCount",
+ 0,
+ "Array of extended metamorphosis subtables.",
+ ),
+ ],
+ ),
+ (
+ "MorphFeature",
+ [
+ ("uint16", "FeatureType", None, None, "The type of feature."),
+ (
+ "uint16",
+ "FeatureSetting",
+ None,
+ None,
+ "The feature's setting (aka selector).",
+ ),
+ (
+ "Flags32",
+ "EnableFlags",
+ None,
+ None,
+ "Flags for the settings that this feature and setting enables.",
+ ),
+ (
+ "Flags32",
+ "DisableFlags",
+ None,
+ None,
+ "Complement of flags for the settings that this feature and setting disable.",
+ ),
+ ],
+ ),
+ # Apple TrueType Reference Manual, chapter “The ‘morx’ table”,
+ # section “Metamorphosis Subtables”.
+ # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html
+ (
+ "MorxSubtable",
+ [
+ (
+ "uint32",
+ "StructLength",
+ None,
+ None,
+ "Total subtable length, including this header.",
+ ),
+ (
+ "uint8",
+ "CoverageFlags",
+ None,
+ None,
+ "Most significant byte of coverage flags.",
+ ),
+ ("uint16", "Reserved", None, None, "Unused."),
+ ("uint8", "MorphType", None, None, "Subtable type."),
+ (
+ "Flags32",
+ "SubFeatureFlags",
+ None,
+ None,
+ "The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).",
+ ),
+ ("SubStruct", "SubStruct", None, None, "SubTable."),
+ ],
+ ),
+ (
+ "StateHeader",
+ [
+ (
+ "uint32",
+ "ClassCount",
+ None,
+ None,
+ "Number of classes, which is the number of 16-bit entry indices in a single line in the state array.",
+ ),
+ (
+ "uint32",
+ "MorphClass",
+ None,
+ None,
+ "Offset from the start of this state table header to the start of the class table.",
+ ),
+ (
+ "uint32",
+ "StateArrayOffset",
+ None,
+ None,
+ "Offset from the start of this state table header to the start of the state array.",
+ ),
+ (
+ "uint32",
+ "EntryTableOffset",
+ None,
+ None,
+ "Offset from the start of this state table header to the start of the entry table.",
+ ),
+ ],
+ ),
+ (
+ "RearrangementMorph",
+ [
+ (
+ "STXHeader(RearrangementMorphAction)",
+ "StateTable",
+ None,
+ None,
+ "Finite-state transducer table for indic rearrangement.",
+ ),
+ ],
+ ),
+ (
+ "ContextualMorph",
+ [
+ (
+ "STXHeader(ContextualMorphAction)",
+ "StateTable",
+ None,
+ None,
+ "Finite-state transducer for contextual glyph substitution.",
+ ),
+ ],
+ ),
+ (
+ "LigatureMorph",
+ [
+ (
+ "STXHeader(LigatureMorphAction)",
+ "StateTable",
+ None,
+ None,
+ "Finite-state transducer for ligature substitution.",
+ ),
+ ],
+ ),
+ (
+ "NoncontextualMorph",
+ [
+ (
+ "AATLookup(GlyphID)",
+ "Substitution",
+ None,
+ None,
+ "The noncontextual glyph substitution table.",
+ ),
+ ],
+ ),
+ (
+ "InsertionMorph",
+ [
+ (
+ "STXHeader(InsertionMorphAction)",
+ "StateTable",
+ None,
+ None,
+ "Finite-state transducer for glyph insertion.",
+ ),
+ ],
+ ),
+ (
+ "MorphClass",
+ [
+ (
+ "uint16",
+ "FirstGlyph",
+ None,
+ None,
+ "Glyph index of the first glyph in the class table.",
+ ),
+ # ('uint16', 'GlyphCount', None, None, 'Number of glyphs in class table.'),
+ # ('uint8', 'GlyphClass', 'GlyphCount', 0, 'The class codes (indexed by glyph index minus firstGlyph). Class codes range from 0 to the value of stateSize minus 1.'),
+ ],
+ ),
+ # If the 'morx' table version is 3 or greater, then the last subtable in the chain is followed by a subtableGlyphCoverageArray, as described below.
+ # ('Offset', 'MarkGlyphSetsDef', None, 'round(Version*0x10000) >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'),
+ #
+ # prop
+ #
+ (
+ "prop",
+ [
+ (
+ "Fixed",
+ "Version",
+ None,
+ None,
+ "Version number of the AAT glyphs property table. Version 1.0 is the initial table version. Version 2.0, which is recognized by macOS 8.5 and later, adds support for the “attaches on right” bit. Version 3.0, which gets recognized by macOS X and iOS, adds support for the additional directional properties defined in Unicode 3.0.",
+ ),
+ ("struct", "GlyphProperties", None, None, "Glyph properties."),
+ ],
+ ),
+ (
+ "GlyphPropertiesFormat0",
+ [
+ ("uint16", "Format", None, None, "Format, = 0."),
+ (
+ "uint16",
+ "DefaultProperties",
+ None,
+ None,
+ "Default properties applied to a glyph. Since there is no lookup table in prop format 0, the default properties get applied to every glyph in the font.",
+ ),
+ ],
+ ),
+ (
+ "GlyphPropertiesFormat1",
+ [
+ ("uint16", "Format", None, None, "Format, = 1."),
+ (
+ "uint16",
+ "DefaultProperties",
+ None,
+ None,
+ "Default properties applied to a glyph if that glyph is not present in the Properties lookup table.",
+ ),
+ (
+ "AATLookup(uint16)",
+ "Properties",
+ None,
+ None,
+ "Lookup data associating glyphs with their properties.",
+ ),
+ ],
+ ),
+ #
+ # opbd
+ #
+ (
+ "opbd",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version number of the optical bounds table (0x00010000 for the initial version).",
+ ),
+ ("struct", "OpticalBounds", None, None, "Optical bounds table."),
+ ],
+ ),
+ (
+ "OpticalBoundsFormat0",
+ [
+ (
+ "uint16",
+ "Format",
+ None,
+ None,
+ "Format of the optical bounds table, = 0.",
+ ),
+ (
+ "AATLookup(OpticalBoundsDeltas)",
+ "OpticalBoundsDeltas",
+ None,
+ None,
+ "Lookup table associating glyphs with their optical bounds, given as deltas in font units.",
+ ),
+ ],
+ ),
+ (
+ "OpticalBoundsFormat1",
+ [
+ (
+ "uint16",
+ "Format",
+ None,
+ None,
+ "Format of the optical bounds table, = 1.",
+ ),
+ (
+ "AATLookup(OpticalBoundsPoints)",
+ "OpticalBoundsPoints",
+ None,
+ None,
+ "Lookup table associating glyphs with their optical bounds, given as references to control points.",
+ ),
+ ],
+ ),
+ (
+ "OpticalBoundsDeltas",
+ [
+ (
+ "int16",
+ "Left",
+ None,
+ None,
+ "Delta value for the left-side optical edge.",
+ ),
+ ("int16", "Top", None, None, "Delta value for the top-side optical edge."),
+ (
+ "int16",
+ "Right",
+ None,
+ None,
+ "Delta value for the right-side optical edge.",
+ ),
+ (
+ "int16",
+ "Bottom",
+ None,
+ None,
+ "Delta value for the bottom-side optical edge.",
+ ),
+ ],
+ ),
+ (
+ "OpticalBoundsPoints",
+ [
+ (
+ "int16",
+ "Left",
+ None,
+ None,
+ "Control point index for the left-side optical edge, or -1 if this glyph has none.",
+ ),
+ (
+ "int16",
+ "Top",
+ None,
+ None,
+ "Control point index for the top-side optical edge, or -1 if this glyph has none.",
+ ),
+ (
+ "int16",
+ "Right",
+ None,
+ None,
+ "Control point index for the right-side optical edge, or -1 if this glyph has none.",
+ ),
+ (
+ "int16",
+ "Bottom",
+ None,
+ None,
+ "Control point index for the bottom-side optical edge, or -1 if this glyph has none.",
+ ),
+ ],
+ ),
+ #
+ # TSIC
+ #
+ (
+ "TSIC",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of table initially set to 0x00010000.",
+ ),
+ ("uint16", "Flags", None, None, "TSIC flags - set to 0"),
+ ("uint16", "AxisCount", None, None, "Axis count from fvar"),
+ ("uint16", "RecordCount", None, None, "TSIC record count"),
+ ("uint16", "Reserved", None, None, "Set to 0"),
+ ("Tag", "AxisArray", "AxisCount", 0, "Array of axis tags in fvar order"),
+ (
+ "LocationRecord",
+ "RecordLocations",
+ "RecordCount",
+ 0,
+ "Location in variation space of TSIC record",
+ ),
+ ("TSICRecord", "Record", "RecordCount", 0, "Array of TSIC records"),
+ ],
+ ),
+ (
+ "LocationRecord",
+ [
+ ("F2Dot14", "Axis", "AxisCount", 0, "Axis record"),
+ ],
+ ),
+ (
+ "TSICRecord",
+ [
+ ("uint16", "Flags", None, None, "Record flags - set to 0"),
+ ("uint16", "NumCVTEntries", None, None, "Number of CVT number value pairs"),
+ ("uint16", "NameLength", None, None, "Length of optional user record name"),
+ ("uint16", "NameArray", "NameLength", 0, "Unicode 16 name"),
+ ("uint16", "CVTArray", "NumCVTEntries", 0, "CVT number array"),
+ ("int16", "CVTValueArray", "NumCVTEntries", 0, "CVT value"),
+ ],
+ ),
+ #
+ # COLR
+ #
+ (
+ "COLR",
+ [
+ ("uint16", "Version", None, None, "Table version number (starts at 0)."),
+ (
+ "uint16",
+ "BaseGlyphRecordCount",
+ None,
+ None,
+ "Number of Base Glyph Records.",
+ ),
+ (
+ "LOffset",
+ "BaseGlyphRecordArray",
+ None,
+ None,
+ "Offset (from beginning of COLR table) to Base Glyph records.",
+ ),
+ (
+ "LOffset",
+ "LayerRecordArray",
+ None,
+ None,
+ "Offset (from beginning of COLR table) to Layer Records.",
+ ),
+ ("uint16", "LayerRecordCount", None, None, "Number of Layer Records."),
+ (
+ "LOffset",
+ "BaseGlyphList",
+ None,
+ "Version >= 1",
+ "Offset (from beginning of COLR table) to array of Version-1 Base Glyph records.",
+ ),
+ (
+ "LOffset",
+ "LayerList",
+ None,
+ "Version >= 1",
+ "Offset (from beginning of COLR table) to LayerList.",
+ ),
+ (
+ "LOffset",
+ "ClipList",
+ None,
+ "Version >= 1",
+ "Offset to ClipList table (may be NULL)",
+ ),
+ (
+ "LOffsetTo(DeltaSetIndexMap)",
+ "VarIndexMap",
+ None,
+ "Version >= 1",
+ "Offset to DeltaSetIndexMap table (may be NULL)",
+ ),
+ (
+ "LOffset",
+ "VarStore",
+ None,
+ "Version >= 1",
+ "Offset to variation store (may be NULL)",
+ ),
+ ],
+ ),
+ (
+ "BaseGlyphRecordArray",
+ [
+ (
+ "BaseGlyphRecord",
+ "BaseGlyphRecord",
+ "BaseGlyphRecordCount",
+ 0,
+ "Base Glyph records.",
+ ),
+ ],
+ ),
+ (
+ "BaseGlyphRecord",
+ [
+ (
+ "GlyphID",
+ "BaseGlyph",
+ None,
+ None,
+ "Glyph ID of reference glyph. This glyph is for reference only and is not rendered for color.",
+ ),
+ (
+ "uint16",
+ "FirstLayerIndex",
+ None,
+ None,
+ "Index (from beginning of the Layer Records) to the layer record. There will be numLayers consecutive entries for this base glyph.",
+ ),
+ (
+ "uint16",
+ "NumLayers",
+ None,
+ None,
+ "Number of color layers associated with this glyph.",
+ ),
+ ],
+ ),
+ (
+ "LayerRecordArray",
+ [
+ ("LayerRecord", "LayerRecord", "LayerRecordCount", 0, "Layer records."),
+ ],
+ ),
+ (
+ "LayerRecord",
+ [
+ (
+ "GlyphID",
+ "LayerGlyph",
+ None,
+ None,
+ "Glyph ID of layer glyph (must be in z-order from bottom to top).",
+ ),
+ (
+ "uint16",
+ "PaletteIndex",
+ None,
+ None,
+ "Index value to use with a selected color palette.",
+ ),
+ ],
+ ),
+ (
+ "BaseGlyphList",
+ [
+ (
+ "uint32",
+ "BaseGlyphCount",
+ None,
+ None,
+ "Number of Version-1 Base Glyph records",
+ ),
+ (
+ "struct",
+ "BaseGlyphPaintRecord",
+ "BaseGlyphCount",
+ 0,
+ "Array of Version-1 Base Glyph records",
+ ),
+ ],
+ ),
+ (
+ "BaseGlyphPaintRecord",
+ [
+ ("GlyphID", "BaseGlyph", None, None, "Glyph ID of reference glyph."),
+ (
+ "LOffset",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of BaseGlyphPaintRecord) to Paint, typically a PaintColrLayers.",
+ ),
+ ],
+ ),
+ (
+ "LayerList",
+ [
+ ("uint32", "LayerCount", None, None, "Number of Version-1 Layers"),
+ (
+ "LOffset",
+ "Paint",
+ "LayerCount",
+ 0,
+ "Array of offsets to Paint tables, from the start of the LayerList table.",
+ ),
+ ],
+ ),
+ (
+ "ClipListFormat1",
+ [
+ (
+ "uint8",
+ "Format",
+ None,
+ None,
+ "Format for ClipList with 16bit glyph IDs: 1",
+ ),
+ ("uint32", "ClipCount", None, None, "Number of Clip records."),
+ (
+ "struct",
+ "ClipRecord",
+ "ClipCount",
+ 0,
+ "Array of Clip records sorted by glyph ID.",
+ ),
+ ],
+ ),
+ (
+ "ClipRecord",
+ [
+ ("uint16", "StartGlyphID", None, None, "First glyph ID in the range."),
+ ("uint16", "EndGlyphID", None, None, "Last glyph ID in the range."),
+ ("Offset24", "ClipBox", None, None, "Offset to a ClipBox table."),
+ ],
+ ),
+ (
+ "ClipBoxFormat1",
+ [
+ (
+ "uint8",
+ "Format",
+ None,
+ None,
+ "Format for ClipBox without variation: set to 1.",
+ ),
+ ("int16", "xMin", None, None, "Minimum x of clip box."),
+ ("int16", "yMin", None, None, "Minimum y of clip box."),
+ ("int16", "xMax", None, None, "Maximum x of clip box."),
+ ("int16", "yMax", None, None, "Maximum y of clip box."),
+ ],
+ ),
+ (
+ "ClipBoxFormat2",
+ [
+ ("uint8", "Format", None, None, "Format for variable ClipBox: set to 2."),
+ ("int16", "xMin", None, None, "Minimum x of clip box. VarIndexBase + 0."),
+ ("int16", "yMin", None, None, "Minimum y of clip box. VarIndexBase + 1."),
+ ("int16", "xMax", None, None, "Maximum x of clip box. VarIndexBase + 2."),
+ ("int16", "yMax", None, None, "Maximum y of clip box. VarIndexBase + 3."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # COLRv1 Affine2x3 uses the same column-major order to serialize a 2D
+ # Affine Transformation as the one used by fontTools.misc.transform.
+ # However, for historical reasons, the labels 'xy' and 'yx' are swapped.
+ # Their fundamental meaning is the same though.
+ # COLRv1 Affine2x3 follows the names found in FreeType and Cairo.
+ # In all case, the second element in the 6-tuple correspond to the
+ # y-part of the x basis vector, and the third to the x-part of the y
+ # basis vector.
+ # See https://github.com/googlefonts/colr-gradients-spec/pull/85
+ (
+ "Affine2x3",
+ [
+ ("Fixed", "xx", None, None, "x-part of x basis vector"),
+ ("Fixed", "yx", None, None, "y-part of x basis vector"),
+ ("Fixed", "xy", None, None, "x-part of y basis vector"),
+ ("Fixed", "yy", None, None, "y-part of y basis vector"),
+ ("Fixed", "dx", None, None, "Translation in x direction"),
+ ("Fixed", "dy", None, None, "Translation in y direction"),
+ ],
+ ),
+ (
+ "VarAffine2x3",
+ [
+ ("Fixed", "xx", None, None, "x-part of x basis vector. VarIndexBase + 0."),
+ ("Fixed", "yx", None, None, "y-part of x basis vector. VarIndexBase + 1."),
+ ("Fixed", "xy", None, None, "x-part of y basis vector. VarIndexBase + 2."),
+ ("Fixed", "yy", None, None, "y-part of y basis vector. VarIndexBase + 3."),
+ (
+ "Fixed",
+ "dx",
+ None,
+ None,
+ "Translation in x direction. VarIndexBase + 4.",
+ ),
+ (
+ "Fixed",
+ "dy",
+ None,
+ None,
+ "Translation in y direction. VarIndexBase + 5.",
+ ),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ (
+ "ColorStop",
+ [
+ ("F2Dot14", "StopOffset", None, None, ""),
+ ("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."),
+ ("F2Dot14", "Alpha", None, None, "Values outsided [0.,1.] reserved"),
+ ],
+ ),
+ (
+ "VarColorStop",
+ [
+ ("F2Dot14", "StopOffset", None, None, "VarIndexBase + 0."),
+ ("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."),
+ (
+ "F2Dot14",
+ "Alpha",
+ None,
+ None,
+ "Values outsided [0.,1.] reserved. VarIndexBase + 1.",
+ ),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ (
+ "ColorLine",
+ [
+ (
+ "ExtendMode",
+ "Extend",
+ None,
+ None,
+ "Enum {PAD = 0, REPEAT = 1, REFLECT = 2}",
+ ),
+ ("uint16", "StopCount", None, None, "Number of Color stops."),
+ ("ColorStop", "ColorStop", "StopCount", 0, "Array of Color stops."),
+ ],
+ ),
+ (
+ "VarColorLine",
+ [
+ (
+ "ExtendMode",
+ "Extend",
+ None,
+ None,
+ "Enum {PAD = 0, REPEAT = 1, REFLECT = 2}",
+ ),
+ ("uint16", "StopCount", None, None, "Number of Color stops."),
+ ("VarColorStop", "ColorStop", "StopCount", 0, "Array of Color stops."),
+ ],
+ ),
+ # PaintColrLayers
+ (
+ "PaintFormat1",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 1"),
+ (
+ "uint8",
+ "NumLayers",
+ None,
+ None,
+ "Number of offsets to Paint to read from LayerList.",
+ ),
+ ("uint32", "FirstLayerIndex", None, None, "Index into LayerList."),
+ ],
+ ),
+ # PaintSolid
+ (
+ "PaintFormat2",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 2"),
+ ("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."),
+ ("F2Dot14", "Alpha", None, None, "Values outsided [0.,1.] reserved"),
+ ],
+ ),
+ # PaintVarSolid
+ (
+ "PaintFormat3",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 3"),
+ ("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."),
+ (
+ "F2Dot14",
+ "Alpha",
+ None,
+ None,
+ "Values outsided [0.,1.] reserved. VarIndexBase + 0.",
+ ),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintLinearGradient
+ (
+ "PaintFormat4",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 4"),
+ (
+ "Offset24",
+ "ColorLine",
+ None,
+ None,
+ "Offset (from beginning of PaintLinearGradient table) to ColorLine subtable.",
+ ),
+ ("int16", "x0", None, None, ""),
+ ("int16", "y0", None, None, ""),
+ ("int16", "x1", None, None, ""),
+ ("int16", "y1", None, None, ""),
+ ("int16", "x2", None, None, ""),
+ ("int16", "y2", None, None, ""),
+ ],
+ ),
+ # PaintVarLinearGradient
+ (
+ "PaintFormat5",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 5"),
+ (
+ "LOffset24To(VarColorLine)",
+ "ColorLine",
+ None,
+ None,
+ "Offset (from beginning of PaintVarLinearGradient table) to VarColorLine subtable.",
+ ),
+ ("int16", "x0", None, None, "VarIndexBase + 0."),
+ ("int16", "y0", None, None, "VarIndexBase + 1."),
+ ("int16", "x1", None, None, "VarIndexBase + 2."),
+ ("int16", "y1", None, None, "VarIndexBase + 3."),
+ ("int16", "x2", None, None, "VarIndexBase + 4."),
+ ("int16", "y2", None, None, "VarIndexBase + 5."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintRadialGradient
+ (
+ "PaintFormat6",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 6"),
+ (
+ "Offset24",
+ "ColorLine",
+ None,
+ None,
+ "Offset (from beginning of PaintRadialGradient table) to ColorLine subtable.",
+ ),
+ ("int16", "x0", None, None, ""),
+ ("int16", "y0", None, None, ""),
+ ("uint16", "r0", None, None, ""),
+ ("int16", "x1", None, None, ""),
+ ("int16", "y1", None, None, ""),
+ ("uint16", "r1", None, None, ""),
+ ],
+ ),
+ # PaintVarRadialGradient
+ (
+ "PaintFormat7",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 7"),
+ (
+ "LOffset24To(VarColorLine)",
+ "ColorLine",
+ None,
+ None,
+ "Offset (from beginning of PaintVarRadialGradient table) to VarColorLine subtable.",
+ ),
+ ("int16", "x0", None, None, "VarIndexBase + 0."),
+ ("int16", "y0", None, None, "VarIndexBase + 1."),
+ ("uint16", "r0", None, None, "VarIndexBase + 2."),
+ ("int16", "x1", None, None, "VarIndexBase + 3."),
+ ("int16", "y1", None, None, "VarIndexBase + 4."),
+ ("uint16", "r1", None, None, "VarIndexBase + 5."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintSweepGradient
+ (
+ "PaintFormat8",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 8"),
+ (
+ "Offset24",
+ "ColorLine",
+ None,
+ None,
+ "Offset (from beginning of PaintSweepGradient table) to ColorLine subtable.",
+ ),
+ ("int16", "centerX", None, None, "Center x coordinate."),
+ ("int16", "centerY", None, None, "Center y coordinate."),
+ (
+ "BiasedAngle",
+ "startAngle",
+ None,
+ None,
+ "Start of the angular range of the gradient.",
+ ),
+ (
+ "BiasedAngle",
+ "endAngle",
+ None,
+ None,
+ "End of the angular range of the gradient.",
+ ),
+ ],
+ ),
+ # PaintVarSweepGradient
+ (
+ "PaintFormat9",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 9"),
+ (
+ "LOffset24To(VarColorLine)",
+ "ColorLine",
+ None,
+ None,
+ "Offset (from beginning of PaintVarSweepGradient table) to VarColorLine subtable.",
+ ),
+ ("int16", "centerX", None, None, "Center x coordinate. VarIndexBase + 0."),
+ ("int16", "centerY", None, None, "Center y coordinate. VarIndexBase + 1."),
+ (
+ "BiasedAngle",
+ "startAngle",
+ None,
+ None,
+ "Start of the angular range of the gradient. VarIndexBase + 2.",
+ ),
+ (
+ "BiasedAngle",
+ "endAngle",
+ None,
+ None,
+ "End of the angular range of the gradient. VarIndexBase + 3.",
+ ),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintGlyph
+ (
+ "PaintFormat10",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 10"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintGlyph table) to Paint subtable.",
+ ),
+ ("GlyphID", "Glyph", None, None, "Glyph ID for the source outline."),
+ ],
+ ),
+ # PaintColrGlyph
+ (
+ "PaintFormat11",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 11"),
+ (
+ "GlyphID",
+ "Glyph",
+ None,
+ None,
+ "Virtual glyph ID for a BaseGlyphList base glyph.",
+ ),
+ ],
+ ),
+ # PaintTransform
+ (
+ "PaintFormat12",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 12"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintTransform table) to Paint subtable.",
+ ),
+ (
+ "LOffset24To(Affine2x3)",
+ "Transform",
+ None,
+ None,
+ "2x3 matrix for 2D affine transformations.",
+ ),
+ ],
+ ),
+ # PaintVarTransform
+ (
+ "PaintFormat13",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 13"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarTransform table) to Paint subtable.",
+ ),
+ (
+ "LOffset24To(VarAffine2x3)",
+ "Transform",
+ None,
+ None,
+ "2x3 matrix for 2D affine transformations.",
+ ),
+ ],
+ ),
+ # PaintTranslate
+ (
+ "PaintFormat14",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 14"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintTranslate table) to Paint subtable.",
+ ),
+ ("int16", "dx", None, None, "Translation in x direction."),
+ ("int16", "dy", None, None, "Translation in y direction."),
+ ],
+ ),
+ # PaintVarTranslate
+ (
+ "PaintFormat15",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 15"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarTranslate table) to Paint subtable.",
+ ),
+ (
+ "int16",
+ "dx",
+ None,
+ None,
+ "Translation in x direction. VarIndexBase + 0.",
+ ),
+ (
+ "int16",
+ "dy",
+ None,
+ None,
+ "Translation in y direction. VarIndexBase + 1.",
+ ),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintScale
+ (
+ "PaintFormat16",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 16"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintScale table) to Paint subtable.",
+ ),
+ ("F2Dot14", "scaleX", None, None, ""),
+ ("F2Dot14", "scaleY", None, None, ""),
+ ],
+ ),
+ # PaintVarScale
+ (
+ "PaintFormat17",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 17"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarScale table) to Paint subtable.",
+ ),
+ ("F2Dot14", "scaleX", None, None, "VarIndexBase + 0."),
+ ("F2Dot14", "scaleY", None, None, "VarIndexBase + 1."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintScaleAroundCenter
+ (
+ "PaintFormat18",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 18"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintScaleAroundCenter table) to Paint subtable.",
+ ),
+ ("F2Dot14", "scaleX", None, None, ""),
+ ("F2Dot14", "scaleY", None, None, ""),
+ ("int16", "centerX", None, None, ""),
+ ("int16", "centerY", None, None, ""),
+ ],
+ ),
+ # PaintVarScaleAroundCenter
+ (
+ "PaintFormat19",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 19"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarScaleAroundCenter table) to Paint subtable.",
+ ),
+ ("F2Dot14", "scaleX", None, None, "VarIndexBase + 0."),
+ ("F2Dot14", "scaleY", None, None, "VarIndexBase + 1."),
+ ("int16", "centerX", None, None, "VarIndexBase + 2."),
+ ("int16", "centerY", None, None, "VarIndexBase + 3."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintScaleUniform
+ (
+ "PaintFormat20",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 20"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintScaleUniform table) to Paint subtable.",
+ ),
+ ("F2Dot14", "scale", None, None, ""),
+ ],
+ ),
+ # PaintVarScaleUniform
+ (
+ "PaintFormat21",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 21"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarScaleUniform table) to Paint subtable.",
+ ),
+ ("F2Dot14", "scale", None, None, "VarIndexBase + 0."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintScaleUniformAroundCenter
+ (
+ "PaintFormat22",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 22"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintScaleUniformAroundCenter table) to Paint subtable.",
+ ),
+ ("F2Dot14", "scale", None, None, ""),
+ ("int16", "centerX", None, None, ""),
+ ("int16", "centerY", None, None, ""),
+ ],
+ ),
+ # PaintVarScaleUniformAroundCenter
+ (
+ "PaintFormat23",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 23"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarScaleUniformAroundCenter table) to Paint subtable.",
+ ),
+ ("F2Dot14", "scale", None, None, "VarIndexBase + 0"),
+ ("int16", "centerX", None, None, "VarIndexBase + 1"),
+ ("int16", "centerY", None, None, "VarIndexBase + 2"),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintRotate
+ (
+ "PaintFormat24",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 24"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintRotate table) to Paint subtable.",
+ ),
+ ("Angle", "angle", None, None, ""),
+ ],
+ ),
+ # PaintVarRotate
+ (
+ "PaintFormat25",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 25"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarRotate table) to Paint subtable.",
+ ),
+ ("Angle", "angle", None, None, "VarIndexBase + 0."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintRotateAroundCenter
+ (
+ "PaintFormat26",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 26"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintRotateAroundCenter table) to Paint subtable.",
+ ),
+ ("Angle", "angle", None, None, ""),
+ ("int16", "centerX", None, None, ""),
+ ("int16", "centerY", None, None, ""),
+ ],
+ ),
+ # PaintVarRotateAroundCenter
+ (
+ "PaintFormat27",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 27"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarRotateAroundCenter table) to Paint subtable.",
+ ),
+ ("Angle", "angle", None, None, "VarIndexBase + 0."),
+ ("int16", "centerX", None, None, "VarIndexBase + 1."),
+ ("int16", "centerY", None, None, "VarIndexBase + 2."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintSkew
+ (
+ "PaintFormat28",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 28"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintSkew table) to Paint subtable.",
+ ),
+ ("Angle", "xSkewAngle", None, None, ""),
+ ("Angle", "ySkewAngle", None, None, ""),
+ ],
+ ),
+ # PaintVarSkew
+ (
+ "PaintFormat29",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 29"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarSkew table) to Paint subtable.",
+ ),
+ ("Angle", "xSkewAngle", None, None, "VarIndexBase + 0."),
+ ("Angle", "ySkewAngle", None, None, "VarIndexBase + 1."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintSkewAroundCenter
+ (
+ "PaintFormat30",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 30"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintSkewAroundCenter table) to Paint subtable.",
+ ),
+ ("Angle", "xSkewAngle", None, None, ""),
+ ("Angle", "ySkewAngle", None, None, ""),
+ ("int16", "centerX", None, None, ""),
+ ("int16", "centerY", None, None, ""),
+ ],
+ ),
+ # PaintVarSkewAroundCenter
+ (
+ "PaintFormat31",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 31"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarSkewAroundCenter table) to Paint subtable.",
+ ),
+ ("Angle", "xSkewAngle", None, None, "VarIndexBase + 0."),
+ ("Angle", "ySkewAngle", None, None, "VarIndexBase + 1."),
+ ("int16", "centerX", None, None, "VarIndexBase + 2."),
+ ("int16", "centerY", None, None, "VarIndexBase + 3."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintComposite
+ (
+ "PaintFormat32",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 32"),
+ (
+ "LOffset24To(Paint)",
+ "SourcePaint",
+ None,
+ None,
+ "Offset (from beginning of PaintComposite table) to source Paint subtable.",
+ ),
+ (
+ "CompositeMode",
+ "CompositeMode",
+ None,
+ None,
+ "A CompositeMode enumeration value.",
+ ),
+ (
+ "LOffset24To(Paint)",
+ "BackdropPaint",
+ None,
+ None,
+ "Offset (from beginning of PaintComposite table) to backdrop Paint subtable.",
+ ),
+ ],
+ ),
+ #
+ # avar
+ #
+ (
+ "AxisValueMap",
+ [
+ (
+ "F2Dot14",
+ "FromCoordinate",
+ None,
+ None,
+ "A normalized coordinate value obtained using default normalization",
+ ),
+ (
+ "F2Dot14",
+ "ToCoordinate",
+ None,
+ None,
+ "The modified, normalized coordinate value",
+ ),
+ ],
+ ),
+ (
+ "AxisSegmentMap",
+ [
+ (
+ "uint16",
+ "PositionMapCount",
+ None,
+ None,
+ "The number of correspondence pairs for this axis",
+ ),
+ (
+ "AxisValueMap",
+ "AxisValueMap",
+ "PositionMapCount",
+ 0,
+ "The array of axis value map records for this axis",
+ ),
+ ],
+ ),
+ (
+ "avar",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the avar table- 0x00010000 or 0x00020000",
+ ),
+ ("uint16", "Reserved", None, None, "Permanently reserved; set to zero"),
+ (
+ "uint16",
+ "AxisCount",
+ None,
+ None,
+ 'The number of variation axes for this font. This must be the same number as axisCount in the "fvar" table',
+ ),
+ (
+ "AxisSegmentMap",
+ "AxisSegmentMap",
+ "AxisCount",
+ 0,
+ 'The segment maps array — one segment map for each axis, in the order of axes specified in the "fvar" table',
+ ),
+ (
+ "LOffsetTo(DeltaSetIndexMap)",
+ "VarIdxMap",
+ None,
+ "Version >= 0x00020000",
+ "",
+ ),
+ ("LOffset", "VarStore", None, "Version >= 0x00020000", ""),
+ ],
+ ),
]
diff --git a/Lib/fontTools/ttLib/tables/otTables.py b/Lib/fontTools/ttLib/tables/otTables.py
index 6e7f3dfb..262f8d41 100644
--- a/Lib/fontTools/ttLib/tables/otTables.py
+++ b/Lib/fontTools/ttLib/tables/otTables.py
@@ -7,597 +7,623 @@ converter objects from otConverters.py.
"""
import copy
from enum import IntEnum
+from functools import reduce
+from math import radians
import itertools
from collections import defaultdict, namedtuple
+from fontTools.ttLib.tables.otTraverse import dfs_base_table
+from fontTools.misc.arrayTools import quantizeRect
from fontTools.misc.roundTools import otRound
+from fontTools.misc.transform import Transform, Identity
from fontTools.misc.textTools import bytesjoin, pad, safeEval
+from fontTools.pens.boundsPen import ControlBoundsPen
+from fontTools.pens.transformPen import TransformPen
from .otBase import (
- BaseTable, FormatSwitchingBaseTable, ValueRecord, CountReference,
- getFormatSwitchingBaseTableClass,
+ BaseTable,
+ FormatSwitchingBaseTable,
+ ValueRecord,
+ CountReference,
+ getFormatSwitchingBaseTableClass,
)
from fontTools.feaLib.lookupDebugInfo import LookupDebugInfo, LOOKUP_DEBUG_INFO_KEY
import logging
import struct
+from typing import TYPE_CHECKING, Iterator, List, Optional, Set
+
+if TYPE_CHECKING:
+ from fontTools.ttLib.ttGlyphSet import _TTGlyphSet
log = logging.getLogger(__name__)
class AATStateTable(object):
- def __init__(self):
- self.GlyphClasses = {} # GlyphID --> GlyphClass
- self.States = [] # List of AATState, indexed by state number
- self.PerGlyphLookups = [] # [{GlyphID:GlyphID}, ...]
+ def __init__(self):
+ self.GlyphClasses = {} # GlyphID --> GlyphClass
+ self.States = [] # List of AATState, indexed by state number
+ self.PerGlyphLookups = [] # [{GlyphID:GlyphID}, ...]
class AATState(object):
- def __init__(self):
- self.Transitions = {} # GlyphClass --> AATAction
+ def __init__(self):
+ self.Transitions = {} # GlyphClass --> AATAction
class AATAction(object):
- _FLAGS = None
+ _FLAGS = None
- @staticmethod
- def compileActions(font, states):
- return (None, None)
+ @staticmethod
+ def compileActions(font, states):
+ return (None, None)
- def _writeFlagsToXML(self, xmlWriter):
- flags = [f for f in self._FLAGS if self.__dict__[f]]
- if flags:
- xmlWriter.simpletag("Flags", value=",".join(flags))
- xmlWriter.newline()
- if self.ReservedFlags != 0:
- xmlWriter.simpletag(
- "ReservedFlags",
- value='0x%04X' % self.ReservedFlags)
- xmlWriter.newline()
+ def _writeFlagsToXML(self, xmlWriter):
+ flags = [f for f in self._FLAGS if self.__dict__[f]]
+ if flags:
+ xmlWriter.simpletag("Flags", value=",".join(flags))
+ xmlWriter.newline()
+ if self.ReservedFlags != 0:
+ xmlWriter.simpletag("ReservedFlags", value="0x%04X" % self.ReservedFlags)
+ xmlWriter.newline()
- def _setFlag(self, flag):
- assert flag in self._FLAGS, "unsupported flag %s" % flag
- self.__dict__[flag] = True
+ def _setFlag(self, flag):
+ assert flag in self._FLAGS, "unsupported flag %s" % flag
+ self.__dict__[flag] = True
class RearrangementMorphAction(AATAction):
- staticSize = 4
- actionHeaderSize = 0
- _FLAGS = ["MarkFirst", "DontAdvance", "MarkLast"]
-
- _VERBS = {
- 0: "no change",
- 1: "Ax ⇒ xA",
- 2: "xD ⇒ Dx",
- 3: "AxD ⇒ DxA",
- 4: "ABx ⇒ xAB",
- 5: "ABx ⇒ xBA",
- 6: "xCD ⇒ CDx",
- 7: "xCD ⇒ DCx",
- 8: "AxCD ⇒ CDxA",
- 9: "AxCD ⇒ DCxA",
- 10: "ABxD ⇒ DxAB",
- 11: "ABxD ⇒ DxBA",
- 12: "ABxCD ⇒ CDxAB",
- 13: "ABxCD ⇒ CDxBA",
- 14: "ABxCD ⇒ DCxAB",
- 15: "ABxCD ⇒ DCxBA",
- }
-
- def __init__(self):
- self.NewState = 0
- self.Verb = 0
- self.MarkFirst = False
- self.DontAdvance = False
- self.MarkLast = False
- self.ReservedFlags = 0
-
- def compile(self, writer, font, actionIndex):
- assert actionIndex is None
- writer.writeUShort(self.NewState)
- assert self.Verb >= 0 and self.Verb <= 15, self.Verb
- flags = self.Verb | self.ReservedFlags
- if self.MarkFirst: flags |= 0x8000
- if self.DontAdvance: flags |= 0x4000
- if self.MarkLast: flags |= 0x2000
- writer.writeUShort(flags)
-
- def decompile(self, reader, font, actionReader):
- assert actionReader is None
- self.NewState = reader.readUShort()
- flags = reader.readUShort()
- self.Verb = flags & 0xF
- self.MarkFirst = bool(flags & 0x8000)
- self.DontAdvance = bool(flags & 0x4000)
- self.MarkLast = bool(flags & 0x2000)
- self.ReservedFlags = flags & 0x1FF0
-
- def toXML(self, xmlWriter, font, attrs, name):
- xmlWriter.begintag(name, **attrs)
- xmlWriter.newline()
- xmlWriter.simpletag("NewState", value=self.NewState)
- xmlWriter.newline()
- self._writeFlagsToXML(xmlWriter)
- xmlWriter.simpletag("Verb", value=self.Verb)
- verbComment = self._VERBS.get(self.Verb)
- if verbComment is not None:
- xmlWriter.comment(verbComment)
- xmlWriter.newline()
- xmlWriter.endtag(name)
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- self.NewState = self.Verb = self.ReservedFlags = 0
- self.MarkFirst = self.DontAdvance = self.MarkLast = False
- content = [t for t in content if isinstance(t, tuple)]
- for eltName, eltAttrs, eltContent in content:
- if eltName == "NewState":
- self.NewState = safeEval(eltAttrs["value"])
- elif eltName == "Verb":
- self.Verb = safeEval(eltAttrs["value"])
- elif eltName == "ReservedFlags":
- self.ReservedFlags = safeEval(eltAttrs["value"])
- elif eltName == "Flags":
- for flag in eltAttrs["value"].split(","):
- self._setFlag(flag.strip())
+ staticSize = 4
+ actionHeaderSize = 0
+ _FLAGS = ["MarkFirst", "DontAdvance", "MarkLast"]
+
+ _VERBS = {
+ 0: "no change",
+ 1: "Ax ⇒ xA",
+ 2: "xD ⇒ Dx",
+ 3: "AxD ⇒ DxA",
+ 4: "ABx ⇒ xAB",
+ 5: "ABx ⇒ xBA",
+ 6: "xCD ⇒ CDx",
+ 7: "xCD ⇒ DCx",
+ 8: "AxCD ⇒ CDxA",
+ 9: "AxCD ⇒ DCxA",
+ 10: "ABxD ⇒ DxAB",
+ 11: "ABxD ⇒ DxBA",
+ 12: "ABxCD ⇒ CDxAB",
+ 13: "ABxCD ⇒ CDxBA",
+ 14: "ABxCD ⇒ DCxAB",
+ 15: "ABxCD ⇒ DCxBA",
+ }
+
+ def __init__(self):
+ self.NewState = 0
+ self.Verb = 0
+ self.MarkFirst = False
+ self.DontAdvance = False
+ self.MarkLast = False
+ self.ReservedFlags = 0
+
+ def compile(self, writer, font, actionIndex):
+ assert actionIndex is None
+ writer.writeUShort(self.NewState)
+ assert self.Verb >= 0 and self.Verb <= 15, self.Verb
+ flags = self.Verb | self.ReservedFlags
+ if self.MarkFirst:
+ flags |= 0x8000
+ if self.DontAdvance:
+ flags |= 0x4000
+ if self.MarkLast:
+ flags |= 0x2000
+ writer.writeUShort(flags)
+
+ def decompile(self, reader, font, actionReader):
+ assert actionReader is None
+ self.NewState = reader.readUShort()
+ flags = reader.readUShort()
+ self.Verb = flags & 0xF
+ self.MarkFirst = bool(flags & 0x8000)
+ self.DontAdvance = bool(flags & 0x4000)
+ self.MarkLast = bool(flags & 0x2000)
+ self.ReservedFlags = flags & 0x1FF0
+
+ def toXML(self, xmlWriter, font, attrs, name):
+ xmlWriter.begintag(name, **attrs)
+ xmlWriter.newline()
+ xmlWriter.simpletag("NewState", value=self.NewState)
+ xmlWriter.newline()
+ self._writeFlagsToXML(xmlWriter)
+ xmlWriter.simpletag("Verb", value=self.Verb)
+ verbComment = self._VERBS.get(self.Verb)
+ if verbComment is not None:
+ xmlWriter.comment(verbComment)
+ xmlWriter.newline()
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ self.NewState = self.Verb = self.ReservedFlags = 0
+ self.MarkFirst = self.DontAdvance = self.MarkLast = False
+ content = [t for t in content if isinstance(t, tuple)]
+ for eltName, eltAttrs, eltContent in content:
+ if eltName == "NewState":
+ self.NewState = safeEval(eltAttrs["value"])
+ elif eltName == "Verb":
+ self.Verb = safeEval(eltAttrs["value"])
+ elif eltName == "ReservedFlags":
+ self.ReservedFlags = safeEval(eltAttrs["value"])
+ elif eltName == "Flags":
+ for flag in eltAttrs["value"].split(","):
+ self._setFlag(flag.strip())
class ContextualMorphAction(AATAction):
- staticSize = 8
- actionHeaderSize = 0
- _FLAGS = ["SetMark", "DontAdvance"]
-
- def __init__(self):
- self.NewState = 0
- self.SetMark, self.DontAdvance = False, False
- self.ReservedFlags = 0
- self.MarkIndex, self.CurrentIndex = 0xFFFF, 0xFFFF
-
- def compile(self, writer, font, actionIndex):
- assert actionIndex is None
- writer.writeUShort(self.NewState)
- flags = self.ReservedFlags
- if self.SetMark: flags |= 0x8000
- if self.DontAdvance: flags |= 0x4000
- writer.writeUShort(flags)
- writer.writeUShort(self.MarkIndex)
- writer.writeUShort(self.CurrentIndex)
-
- def decompile(self, reader, font, actionReader):
- assert actionReader is None
- self.NewState = reader.readUShort()
- flags = reader.readUShort()
- self.SetMark = bool(flags & 0x8000)
- self.DontAdvance = bool(flags & 0x4000)
- self.ReservedFlags = flags & 0x3FFF
- self.MarkIndex = reader.readUShort()
- self.CurrentIndex = reader.readUShort()
-
- def toXML(self, xmlWriter, font, attrs, name):
- xmlWriter.begintag(name, **attrs)
- xmlWriter.newline()
- xmlWriter.simpletag("NewState", value=self.NewState)
- xmlWriter.newline()
- self._writeFlagsToXML(xmlWriter)
- xmlWriter.simpletag("MarkIndex", value=self.MarkIndex)
- xmlWriter.newline()
- xmlWriter.simpletag("CurrentIndex",
- value=self.CurrentIndex)
- xmlWriter.newline()
- xmlWriter.endtag(name)
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- self.NewState = self.ReservedFlags = 0
- self.SetMark = self.DontAdvance = False
- self.MarkIndex, self.CurrentIndex = 0xFFFF, 0xFFFF
- content = [t for t in content if isinstance(t, tuple)]
- for eltName, eltAttrs, eltContent in content:
- if eltName == "NewState":
- self.NewState = safeEval(eltAttrs["value"])
- elif eltName == "Flags":
- for flag in eltAttrs["value"].split(","):
- self._setFlag(flag.strip())
- elif eltName == "ReservedFlags":
- self.ReservedFlags = safeEval(eltAttrs["value"])
- elif eltName == "MarkIndex":
- self.MarkIndex = safeEval(eltAttrs["value"])
- elif eltName == "CurrentIndex":
- self.CurrentIndex = safeEval(eltAttrs["value"])
+ staticSize = 8
+ actionHeaderSize = 0
+ _FLAGS = ["SetMark", "DontAdvance"]
+
+ def __init__(self):
+ self.NewState = 0
+ self.SetMark, self.DontAdvance = False, False
+ self.ReservedFlags = 0
+ self.MarkIndex, self.CurrentIndex = 0xFFFF, 0xFFFF
+
+ def compile(self, writer, font, actionIndex):
+ assert actionIndex is None
+ writer.writeUShort(self.NewState)
+ flags = self.ReservedFlags
+ if self.SetMark:
+ flags |= 0x8000
+ if self.DontAdvance:
+ flags |= 0x4000
+ writer.writeUShort(flags)
+ writer.writeUShort(self.MarkIndex)
+ writer.writeUShort(self.CurrentIndex)
+
+ def decompile(self, reader, font, actionReader):
+ assert actionReader is None
+ self.NewState = reader.readUShort()
+ flags = reader.readUShort()
+ self.SetMark = bool(flags & 0x8000)
+ self.DontAdvance = bool(flags & 0x4000)
+ self.ReservedFlags = flags & 0x3FFF
+ self.MarkIndex = reader.readUShort()
+ self.CurrentIndex = reader.readUShort()
+
+ def toXML(self, xmlWriter, font, attrs, name):
+ xmlWriter.begintag(name, **attrs)
+ xmlWriter.newline()
+ xmlWriter.simpletag("NewState", value=self.NewState)
+ xmlWriter.newline()
+ self._writeFlagsToXML(xmlWriter)
+ xmlWriter.simpletag("MarkIndex", value=self.MarkIndex)
+ xmlWriter.newline()
+ xmlWriter.simpletag("CurrentIndex", value=self.CurrentIndex)
+ xmlWriter.newline()
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ self.NewState = self.ReservedFlags = 0
+ self.SetMark = self.DontAdvance = False
+ self.MarkIndex, self.CurrentIndex = 0xFFFF, 0xFFFF
+ content = [t for t in content if isinstance(t, tuple)]
+ for eltName, eltAttrs, eltContent in content:
+ if eltName == "NewState":
+ self.NewState = safeEval(eltAttrs["value"])
+ elif eltName == "Flags":
+ for flag in eltAttrs["value"].split(","):
+ self._setFlag(flag.strip())
+ elif eltName == "ReservedFlags":
+ self.ReservedFlags = safeEval(eltAttrs["value"])
+ elif eltName == "MarkIndex":
+ self.MarkIndex = safeEval(eltAttrs["value"])
+ elif eltName == "CurrentIndex":
+ self.CurrentIndex = safeEval(eltAttrs["value"])
class LigAction(object):
- def __init__(self):
- self.Store = False
- # GlyphIndexDelta is a (possibly negative) delta that gets
- # added to the glyph ID at the top of the AAT runtime
- # execution stack. It is *not* a byte offset into the
- # morx table. The result of the addition, which is performed
- # at run time by the shaping engine, is an index into
- # the ligature components table. See 'morx' specification.
- # In the AAT specification, this field is called Offset;
- # but its meaning is quite different from other offsets
- # in either AAT or OpenType, so we use a different name.
- self.GlyphIndexDelta = 0
+ def __init__(self):
+ self.Store = False
+ # GlyphIndexDelta is a (possibly negative) delta that gets
+ # added to the glyph ID at the top of the AAT runtime
+ # execution stack. It is *not* a byte offset into the
+ # morx table. The result of the addition, which is performed
+ # at run time by the shaping engine, is an index into
+ # the ligature components table. See 'morx' specification.
+ # In the AAT specification, this field is called Offset;
+ # but its meaning is quite different from other offsets
+ # in either AAT or OpenType, so we use a different name.
+ self.GlyphIndexDelta = 0
class LigatureMorphAction(AATAction):
- staticSize = 6
-
- # 4 bytes for each of {action,ligComponents,ligatures}Offset
- actionHeaderSize = 12
-
- _FLAGS = ["SetComponent", "DontAdvance"]
-
- def __init__(self):
- self.NewState = 0
- self.SetComponent, self.DontAdvance = False, False
- self.ReservedFlags = 0
- self.Actions = []
-
- def compile(self, writer, font, actionIndex):
- assert actionIndex is not None
- writer.writeUShort(self.NewState)
- flags = self.ReservedFlags
- if self.SetComponent: flags |= 0x8000
- if self.DontAdvance: flags |= 0x4000
- if len(self.Actions) > 0: flags |= 0x2000
- writer.writeUShort(flags)
- if len(self.Actions) > 0:
- actions = self.compileLigActions()
- writer.writeUShort(actionIndex[actions])
- else:
- writer.writeUShort(0)
-
- def decompile(self, reader, font, actionReader):
- assert actionReader is not None
- self.NewState = reader.readUShort()
- flags = reader.readUShort()
- self.SetComponent = bool(flags & 0x8000)
- self.DontAdvance = bool(flags & 0x4000)
- performAction = bool(flags & 0x2000)
- # As of 2017-09-12, the 'morx' specification says that
- # the reserved bitmask in ligature subtables is 0x3FFF.
- # However, the specification also defines a flag 0x2000,
- # so the reserved value should actually be 0x1FFF.
- # TODO: Report this specification bug to Apple.
- self.ReservedFlags = flags & 0x1FFF
- actionIndex = reader.readUShort()
- if performAction:
- self.Actions = self._decompileLigActions(
- actionReader, actionIndex)
- else:
- self.Actions = []
-
- @staticmethod
- def compileActions(font, states):
- result, actions, actionIndex = b"", set(), {}
- for state in states:
- for _glyphClass, trans in state.Transitions.items():
- actions.add(trans.compileLigActions())
- # Sort the compiled actions in decreasing order of
- # length, so that the longer sequence come before the
- # shorter ones. For each compiled action ABCD, its
- # suffixes BCD, CD, and D do not be encoded separately
- # (in case they occur); instead, we can just store an
- # index that points into the middle of the longer
- # sequence. Every compiled AAT ligature sequence is
- # terminated with an end-of-sequence flag, which can
- # only be set on the last element of the sequence.
- # Therefore, it is sufficient to consider just the
- # suffixes.
- for a in sorted(actions, key=lambda x:(-len(x), x)):
- if a not in actionIndex:
- for i in range(0, len(a), 4):
- suffix = a[i:]
- suffixIndex = (len(result) + i) // 4
- actionIndex.setdefault(
- suffix, suffixIndex)
- result += a
- result = pad(result, 4)
- return (result, actionIndex)
-
- def compileLigActions(self):
- result = []
- for i, action in enumerate(self.Actions):
- last = (i == len(self.Actions) - 1)
- value = action.GlyphIndexDelta & 0x3FFFFFFF
- value |= 0x80000000 if last else 0
- value |= 0x40000000 if action.Store else 0
- result.append(struct.pack(">L", value))
- return bytesjoin(result)
-
- def _decompileLigActions(self, actionReader, actionIndex):
- actions = []
- last = False
- reader = actionReader.getSubReader(
- actionReader.pos + actionIndex * 4)
- while not last:
- value = reader.readULong()
- last = bool(value & 0x80000000)
- action = LigAction()
- actions.append(action)
- action.Store = bool(value & 0x40000000)
- delta = value & 0x3FFFFFFF
- if delta >= 0x20000000: # sign-extend 30-bit value
- delta = -0x40000000 + delta
- action.GlyphIndexDelta = delta
- return actions
-
- def fromXML(self, name, attrs, content, font):
- self.NewState = self.ReservedFlags = 0
- self.SetComponent = self.DontAdvance = False
- self.ReservedFlags = 0
- self.Actions = []
- content = [t for t in content if isinstance(t, tuple)]
- for eltName, eltAttrs, eltContent in content:
- if eltName == "NewState":
- self.NewState = safeEval(eltAttrs["value"])
- elif eltName == "Flags":
- for flag in eltAttrs["value"].split(","):
- self._setFlag(flag.strip())
- elif eltName == "ReservedFlags":
- self.ReservedFlags = safeEval(eltAttrs["value"])
- elif eltName == "Action":
- action = LigAction()
- flags = eltAttrs.get("Flags", "").split(",")
- flags = [f.strip() for f in flags]
- action.Store = "Store" in flags
- action.GlyphIndexDelta = safeEval(
- eltAttrs["GlyphIndexDelta"])
- self.Actions.append(action)
-
- def toXML(self, xmlWriter, font, attrs, name):
- xmlWriter.begintag(name, **attrs)
- xmlWriter.newline()
- xmlWriter.simpletag("NewState", value=self.NewState)
- xmlWriter.newline()
- self._writeFlagsToXML(xmlWriter)
- for action in self.Actions:
- attribs = [("GlyphIndexDelta", action.GlyphIndexDelta)]
- if action.Store:
- attribs.append(("Flags", "Store"))
- xmlWriter.simpletag("Action", attribs)
- xmlWriter.newline()
- xmlWriter.endtag(name)
- xmlWriter.newline()
+ staticSize = 6
+
+ # 4 bytes for each of {action,ligComponents,ligatures}Offset
+ actionHeaderSize = 12
+
+ _FLAGS = ["SetComponent", "DontAdvance"]
+
+ def __init__(self):
+ self.NewState = 0
+ self.SetComponent, self.DontAdvance = False, False
+ self.ReservedFlags = 0
+ self.Actions = []
+
+ def compile(self, writer, font, actionIndex):
+ assert actionIndex is not None
+ writer.writeUShort(self.NewState)
+ flags = self.ReservedFlags
+ if self.SetComponent:
+ flags |= 0x8000
+ if self.DontAdvance:
+ flags |= 0x4000
+ if len(self.Actions) > 0:
+ flags |= 0x2000
+ writer.writeUShort(flags)
+ if len(self.Actions) > 0:
+ actions = self.compileLigActions()
+ writer.writeUShort(actionIndex[actions])
+ else:
+ writer.writeUShort(0)
+
+ def decompile(self, reader, font, actionReader):
+ assert actionReader is not None
+ self.NewState = reader.readUShort()
+ flags = reader.readUShort()
+ self.SetComponent = bool(flags & 0x8000)
+ self.DontAdvance = bool(flags & 0x4000)
+ performAction = bool(flags & 0x2000)
+ # As of 2017-09-12, the 'morx' specification says that
+ # the reserved bitmask in ligature subtables is 0x3FFF.
+ # However, the specification also defines a flag 0x2000,
+ # so the reserved value should actually be 0x1FFF.
+ # TODO: Report this specification bug to Apple.
+ self.ReservedFlags = flags & 0x1FFF
+ actionIndex = reader.readUShort()
+ if performAction:
+ self.Actions = self._decompileLigActions(actionReader, actionIndex)
+ else:
+ self.Actions = []
+
+ @staticmethod
+ def compileActions(font, states):
+ result, actions, actionIndex = b"", set(), {}
+ for state in states:
+ for _glyphClass, trans in state.Transitions.items():
+ actions.add(trans.compileLigActions())
+ # Sort the compiled actions in decreasing order of
+ # length, so that the longer sequence come before the
+ # shorter ones. For each compiled action ABCD, its
+ # suffixes BCD, CD, and D do not be encoded separately
+ # (in case they occur); instead, we can just store an
+ # index that points into the middle of the longer
+ # sequence. Every compiled AAT ligature sequence is
+ # terminated with an end-of-sequence flag, which can
+ # only be set on the last element of the sequence.
+ # Therefore, it is sufficient to consider just the
+ # suffixes.
+ for a in sorted(actions, key=lambda x: (-len(x), x)):
+ if a not in actionIndex:
+ for i in range(0, len(a), 4):
+ suffix = a[i:]
+ suffixIndex = (len(result) + i) // 4
+ actionIndex.setdefault(suffix, suffixIndex)
+ result += a
+ result = pad(result, 4)
+ return (result, actionIndex)
+
+ def compileLigActions(self):
+ result = []
+ for i, action in enumerate(self.Actions):
+ last = i == len(self.Actions) - 1
+ value = action.GlyphIndexDelta & 0x3FFFFFFF
+ value |= 0x80000000 if last else 0
+ value |= 0x40000000 if action.Store else 0
+ result.append(struct.pack(">L", value))
+ return bytesjoin(result)
+
+ def _decompileLigActions(self, actionReader, actionIndex):
+ actions = []
+ last = False
+ reader = actionReader.getSubReader(actionReader.pos + actionIndex * 4)
+ while not last:
+ value = reader.readULong()
+ last = bool(value & 0x80000000)
+ action = LigAction()
+ actions.append(action)
+ action.Store = bool(value & 0x40000000)
+ delta = value & 0x3FFFFFFF
+ if delta >= 0x20000000: # sign-extend 30-bit value
+ delta = -0x40000000 + delta
+ action.GlyphIndexDelta = delta
+ return actions
+
+ def fromXML(self, name, attrs, content, font):
+ self.NewState = self.ReservedFlags = 0
+ self.SetComponent = self.DontAdvance = False
+ self.ReservedFlags = 0
+ self.Actions = []
+ content = [t for t in content if isinstance(t, tuple)]
+ for eltName, eltAttrs, eltContent in content:
+ if eltName == "NewState":
+ self.NewState = safeEval(eltAttrs["value"])
+ elif eltName == "Flags":
+ for flag in eltAttrs["value"].split(","):
+ self._setFlag(flag.strip())
+ elif eltName == "ReservedFlags":
+ self.ReservedFlags = safeEval(eltAttrs["value"])
+ elif eltName == "Action":
+ action = LigAction()
+ flags = eltAttrs.get("Flags", "").split(",")
+ flags = [f.strip() for f in flags]
+ action.Store = "Store" in flags
+ action.GlyphIndexDelta = safeEval(eltAttrs["GlyphIndexDelta"])
+ self.Actions.append(action)
+
+ def toXML(self, xmlWriter, font, attrs, name):
+ xmlWriter.begintag(name, **attrs)
+ xmlWriter.newline()
+ xmlWriter.simpletag("NewState", value=self.NewState)
+ xmlWriter.newline()
+ self._writeFlagsToXML(xmlWriter)
+ for action in self.Actions:
+ attribs = [("GlyphIndexDelta", action.GlyphIndexDelta)]
+ if action.Store:
+ attribs.append(("Flags", "Store"))
+ xmlWriter.simpletag("Action", attribs)
+ xmlWriter.newline()
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
class InsertionMorphAction(AATAction):
- staticSize = 8
- actionHeaderSize = 4 # 4 bytes for actionOffset
- _FLAGS = ["SetMark", "DontAdvance",
- "CurrentIsKashidaLike", "MarkedIsKashidaLike",
- "CurrentInsertBefore", "MarkedInsertBefore"]
-
- def __init__(self):
- self.NewState = 0
- for flag in self._FLAGS:
- setattr(self, flag, False)
- self.ReservedFlags = 0
- self.CurrentInsertionAction, self.MarkedInsertionAction = [], []
-
- def compile(self, writer, font, actionIndex):
- assert actionIndex is not None
- writer.writeUShort(self.NewState)
- flags = self.ReservedFlags
- if self.SetMark: flags |= 0x8000
- if self.DontAdvance: flags |= 0x4000
- if self.CurrentIsKashidaLike: flags |= 0x2000
- if self.MarkedIsKashidaLike: flags |= 0x1000
- if self.CurrentInsertBefore: flags |= 0x0800
- if self.MarkedInsertBefore: flags |= 0x0400
- flags |= len(self.CurrentInsertionAction) << 5
- flags |= len(self.MarkedInsertionAction)
- writer.writeUShort(flags)
- if len(self.CurrentInsertionAction) > 0:
- currentIndex = actionIndex[
- tuple(self.CurrentInsertionAction)]
- else:
- currentIndex = 0xFFFF
- writer.writeUShort(currentIndex)
- if len(self.MarkedInsertionAction) > 0:
- markedIndex = actionIndex[
- tuple(self.MarkedInsertionAction)]
- else:
- markedIndex = 0xFFFF
- writer.writeUShort(markedIndex)
-
- def decompile(self, reader, font, actionReader):
- assert actionReader is not None
- self.NewState = reader.readUShort()
- flags = reader.readUShort()
- self.SetMark = bool(flags & 0x8000)
- self.DontAdvance = bool(flags & 0x4000)
- self.CurrentIsKashidaLike = bool(flags & 0x2000)
- self.MarkedIsKashidaLike = bool(flags & 0x1000)
- self.CurrentInsertBefore = bool(flags & 0x0800)
- self.MarkedInsertBefore = bool(flags & 0x0400)
- self.CurrentInsertionAction = self._decompileInsertionAction(
- actionReader, font,
- index=reader.readUShort(),
- count=((flags & 0x03E0) >> 5))
- self.MarkedInsertionAction = self._decompileInsertionAction(
- actionReader, font,
- index=reader.readUShort(),
- count=(flags & 0x001F))
-
- def _decompileInsertionAction(self, actionReader, font, index, count):
- if index == 0xFFFF or count == 0:
- return []
- reader = actionReader.getSubReader(
- actionReader.pos + index * 2)
- return font.getGlyphNameMany(reader.readUShortArray(count))
-
- def toXML(self, xmlWriter, font, attrs, name):
- xmlWriter.begintag(name, **attrs)
- xmlWriter.newline()
- xmlWriter.simpletag("NewState", value=self.NewState)
- xmlWriter.newline()
- self._writeFlagsToXML(xmlWriter)
- for g in self.CurrentInsertionAction:
- xmlWriter.simpletag("CurrentInsertionAction", glyph=g)
- xmlWriter.newline()
- for g in self.MarkedInsertionAction:
- xmlWriter.simpletag("MarkedInsertionAction", glyph=g)
- xmlWriter.newline()
- xmlWriter.endtag(name)
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- self.__init__()
- content = [t for t in content if isinstance(t, tuple)]
- for eltName, eltAttrs, eltContent in content:
- if eltName == "NewState":
- self.NewState = safeEval(eltAttrs["value"])
- elif eltName == "Flags":
- for flag in eltAttrs["value"].split(","):
- self._setFlag(flag.strip())
- elif eltName == "CurrentInsertionAction":
- self.CurrentInsertionAction.append(
- eltAttrs["glyph"])
- elif eltName == "MarkedInsertionAction":
- self.MarkedInsertionAction.append(
- eltAttrs["glyph"])
- else:
- assert False, eltName
-
- @staticmethod
- def compileActions(font, states):
- actions, actionIndex, result = set(), {}, b""
- for state in states:
- for _glyphClass, trans in state.Transitions.items():
- if trans.CurrentInsertionAction is not None:
- actions.add(tuple(trans.CurrentInsertionAction))
- if trans.MarkedInsertionAction is not None:
- actions.add(tuple(trans.MarkedInsertionAction))
- # Sort the compiled actions in decreasing order of
- # length, so that the longer sequence come before the
- # shorter ones.
- for action in sorted(actions, key=lambda x:(-len(x), x)):
- # We insert all sub-sequences of the action glyph sequence
- # into actionIndex. For example, if one action triggers on
- # glyph sequence [A, B, C, D, E] and another action triggers
- # on [C, D], we return result=[A, B, C, D, E] (as list of
- # encoded glyph IDs), and actionIndex={('A','B','C','D','E'): 0,
- # ('C','D'): 2}.
- if action in actionIndex:
- continue
- for start in range(0, len(action)):
- startIndex = (len(result) // 2) + start
- for limit in range(start, len(action)):
- glyphs = action[start : limit + 1]
- actionIndex.setdefault(glyphs, startIndex)
- for glyph in action:
- glyphID = font.getGlyphID(glyph)
- result += struct.pack(">H", glyphID)
- return result, actionIndex
+ staticSize = 8
+ actionHeaderSize = 4 # 4 bytes for actionOffset
+ _FLAGS = [
+ "SetMark",
+ "DontAdvance",
+ "CurrentIsKashidaLike",
+ "MarkedIsKashidaLike",
+ "CurrentInsertBefore",
+ "MarkedInsertBefore",
+ ]
+
+ def __init__(self):
+ self.NewState = 0
+ for flag in self._FLAGS:
+ setattr(self, flag, False)
+ self.ReservedFlags = 0
+ self.CurrentInsertionAction, self.MarkedInsertionAction = [], []
+
+ def compile(self, writer, font, actionIndex):
+ assert actionIndex is not None
+ writer.writeUShort(self.NewState)
+ flags = self.ReservedFlags
+ if self.SetMark:
+ flags |= 0x8000
+ if self.DontAdvance:
+ flags |= 0x4000
+ if self.CurrentIsKashidaLike:
+ flags |= 0x2000
+ if self.MarkedIsKashidaLike:
+ flags |= 0x1000
+ if self.CurrentInsertBefore:
+ flags |= 0x0800
+ if self.MarkedInsertBefore:
+ flags |= 0x0400
+ flags |= len(self.CurrentInsertionAction) << 5
+ flags |= len(self.MarkedInsertionAction)
+ writer.writeUShort(flags)
+ if len(self.CurrentInsertionAction) > 0:
+ currentIndex = actionIndex[tuple(self.CurrentInsertionAction)]
+ else:
+ currentIndex = 0xFFFF
+ writer.writeUShort(currentIndex)
+ if len(self.MarkedInsertionAction) > 0:
+ markedIndex = actionIndex[tuple(self.MarkedInsertionAction)]
+ else:
+ markedIndex = 0xFFFF
+ writer.writeUShort(markedIndex)
+
+ def decompile(self, reader, font, actionReader):
+ assert actionReader is not None
+ self.NewState = reader.readUShort()
+ flags = reader.readUShort()
+ self.SetMark = bool(flags & 0x8000)
+ self.DontAdvance = bool(flags & 0x4000)
+ self.CurrentIsKashidaLike = bool(flags & 0x2000)
+ self.MarkedIsKashidaLike = bool(flags & 0x1000)
+ self.CurrentInsertBefore = bool(flags & 0x0800)
+ self.MarkedInsertBefore = bool(flags & 0x0400)
+ self.CurrentInsertionAction = self._decompileInsertionAction(
+ actionReader, font, index=reader.readUShort(), count=((flags & 0x03E0) >> 5)
+ )
+ self.MarkedInsertionAction = self._decompileInsertionAction(
+ actionReader, font, index=reader.readUShort(), count=(flags & 0x001F)
+ )
+
+ def _decompileInsertionAction(self, actionReader, font, index, count):
+ if index == 0xFFFF or count == 0:
+ return []
+ reader = actionReader.getSubReader(actionReader.pos + index * 2)
+ return font.getGlyphNameMany(reader.readUShortArray(count))
+
+ def toXML(self, xmlWriter, font, attrs, name):
+ xmlWriter.begintag(name, **attrs)
+ xmlWriter.newline()
+ xmlWriter.simpletag("NewState", value=self.NewState)
+ xmlWriter.newline()
+ self._writeFlagsToXML(xmlWriter)
+ for g in self.CurrentInsertionAction:
+ xmlWriter.simpletag("CurrentInsertionAction", glyph=g)
+ xmlWriter.newline()
+ for g in self.MarkedInsertionAction:
+ xmlWriter.simpletag("MarkedInsertionAction", glyph=g)
+ xmlWriter.newline()
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ self.__init__()
+ content = [t for t in content if isinstance(t, tuple)]
+ for eltName, eltAttrs, eltContent in content:
+ if eltName == "NewState":
+ self.NewState = safeEval(eltAttrs["value"])
+ elif eltName == "Flags":
+ for flag in eltAttrs["value"].split(","):
+ self._setFlag(flag.strip())
+ elif eltName == "CurrentInsertionAction":
+ self.CurrentInsertionAction.append(eltAttrs["glyph"])
+ elif eltName == "MarkedInsertionAction":
+ self.MarkedInsertionAction.append(eltAttrs["glyph"])
+ else:
+ assert False, eltName
+
+ @staticmethod
+ def compileActions(font, states):
+ actions, actionIndex, result = set(), {}, b""
+ for state in states:
+ for _glyphClass, trans in state.Transitions.items():
+ if trans.CurrentInsertionAction is not None:
+ actions.add(tuple(trans.CurrentInsertionAction))
+ if trans.MarkedInsertionAction is not None:
+ actions.add(tuple(trans.MarkedInsertionAction))
+ # Sort the compiled actions in decreasing order of
+ # length, so that the longer sequence come before the
+ # shorter ones.
+ for action in sorted(actions, key=lambda x: (-len(x), x)):
+ # We insert all sub-sequences of the action glyph sequence
+ # into actionIndex. For example, if one action triggers on
+ # glyph sequence [A, B, C, D, E] and another action triggers
+ # on [C, D], we return result=[A, B, C, D, E] (as list of
+ # encoded glyph IDs), and actionIndex={('A','B','C','D','E'): 0,
+ # ('C','D'): 2}.
+ if action in actionIndex:
+ continue
+ for start in range(0, len(action)):
+ startIndex = (len(result) // 2) + start
+ for limit in range(start, len(action)):
+ glyphs = action[start : limit + 1]
+ actionIndex.setdefault(glyphs, startIndex)
+ for glyph in action:
+ glyphID = font.getGlyphID(glyph)
+ result += struct.pack(">H", glyphID)
+ return result, actionIndex
class FeatureParams(BaseTable):
+ def compile(self, writer, font):
+ assert (
+ featureParamTypes.get(writer["FeatureTag"]) == self.__class__
+ ), "Wrong FeatureParams type for feature '%s': %s" % (
+ writer["FeatureTag"],
+ self.__class__.__name__,
+ )
+ BaseTable.compile(self, writer, font)
- def compile(self, writer, font):
- assert featureParamTypes.get(writer['FeatureTag']) == self.__class__, "Wrong FeatureParams type for feature '%s': %s" % (writer['FeatureTag'], self.__class__.__name__)
- BaseTable.compile(self, writer, font)
+ def toXML(self, xmlWriter, font, attrs=None, name=None):
+ BaseTable.toXML(self, xmlWriter, font, attrs, name=self.__class__.__name__)
- def toXML(self, xmlWriter, font, attrs=None, name=None):
- BaseTable.toXML(self, xmlWriter, font, attrs, name=self.__class__.__name__)
class FeatureParamsSize(FeatureParams):
- pass
+ pass
+
class FeatureParamsStylisticSet(FeatureParams):
- pass
+ pass
+
class FeatureParamsCharacterVariants(FeatureParams):
- pass
+ pass
-class Coverage(FormatSwitchingBaseTable):
- # manual implementation to get rid of glyphID dependencies
-
- def populateDefaults(self, propagator=None):
- if not hasattr(self, 'glyphs'):
- self.glyphs = []
-
- def postRead(self, rawTable, font):
- if self.Format == 1:
- self.glyphs = rawTable["GlyphArray"]
- elif self.Format == 2:
- glyphs = self.glyphs = []
- ranges = rawTable["RangeRecord"]
- # Some SIL fonts have coverage entries that don't have sorted
- # StartCoverageIndex. If it is so, fixup and warn. We undo
- # this when writing font out.
- sorted_ranges = sorted(ranges, key=lambda a: a.StartCoverageIndex)
- if ranges != sorted_ranges:
- log.warning("GSUB/GPOS Coverage is not sorted by glyph ids.")
- ranges = sorted_ranges
- del sorted_ranges
- for r in ranges:
- start = r.Start
- end = r.End
- startID = font.getGlyphID(start)
- endID = font.getGlyphID(end) + 1
- glyphs.extend(font.getGlyphNameMany(range(startID, endID)))
- else:
- self.glyphs = []
- log.warning("Unknown Coverage format: %s", self.Format)
- del self.Format # Don't need this anymore
-
- def preWrite(self, font):
- glyphs = getattr(self, "glyphs", None)
- if glyphs is None:
- glyphs = self.glyphs = []
- format = 1
- rawTable = {"GlyphArray": glyphs}
- if glyphs:
- # find out whether Format 2 is more compact or not
- glyphIDs = font.getGlyphIDMany(glyphs)
- brokenOrder = sorted(glyphIDs) != glyphIDs
-
- last = glyphIDs[0]
- ranges = [[last]]
- for glyphID in glyphIDs[1:]:
- if glyphID != last + 1:
- ranges[-1].append(last)
- ranges.append([glyphID])
- last = glyphID
- ranges[-1].append(last)
-
- if brokenOrder or len(ranges) * 3 < len(glyphs): # 3 words vs. 1 word
- # Format 2 is more compact
- index = 0
- for i in range(len(ranges)):
- start, end = ranges[i]
- r = RangeRecord()
- r.StartID = start
- r.Start = font.getGlyphName(start)
- r.End = font.getGlyphName(end)
- r.StartCoverageIndex = index
- ranges[i] = r
- index = index + end - start + 1
- if brokenOrder:
- log.warning("GSUB/GPOS Coverage is not sorted by glyph ids.")
- ranges.sort(key=lambda a: a.StartID)
- for r in ranges:
- del r.StartID
- format = 2
- rawTable = {"RangeRecord": ranges}
- #else:
- # fallthrough; Format 1 is more compact
- self.Format = format
- return rawTable
-
- def toXML2(self, xmlWriter, font):
- for glyphName in getattr(self, "glyphs", []):
- xmlWriter.simpletag("Glyph", value=glyphName)
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- glyphs = getattr(self, "glyphs", None)
- if glyphs is None:
- glyphs = []
- self.glyphs = glyphs
- glyphs.append(attrs["value"])
+class Coverage(FormatSwitchingBaseTable):
+ # manual implementation to get rid of glyphID dependencies
+
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "glyphs"):
+ self.glyphs = []
+
+ def postRead(self, rawTable, font):
+ if self.Format == 1:
+ self.glyphs = rawTable["GlyphArray"]
+ elif self.Format == 2:
+ glyphs = self.glyphs = []
+ ranges = rawTable["RangeRecord"]
+ # Some SIL fonts have coverage entries that don't have sorted
+ # StartCoverageIndex. If it is so, fixup and warn. We undo
+ # this when writing font out.
+ sorted_ranges = sorted(ranges, key=lambda a: a.StartCoverageIndex)
+ if ranges != sorted_ranges:
+ log.warning("GSUB/GPOS Coverage is not sorted by glyph ids.")
+ ranges = sorted_ranges
+ del sorted_ranges
+ for r in ranges:
+ start = r.Start
+ end = r.End
+ startID = font.getGlyphID(start)
+ endID = font.getGlyphID(end) + 1
+ glyphs.extend(font.getGlyphNameMany(range(startID, endID)))
+ else:
+ self.glyphs = []
+ log.warning("Unknown Coverage format: %s", self.Format)
+ del self.Format # Don't need this anymore
+
+ def preWrite(self, font):
+ glyphs = getattr(self, "glyphs", None)
+ if glyphs is None:
+ glyphs = self.glyphs = []
+ format = 1
+ rawTable = {"GlyphArray": glyphs}
+ if glyphs:
+ # find out whether Format 2 is more compact or not
+ glyphIDs = font.getGlyphIDMany(glyphs)
+ brokenOrder = sorted(glyphIDs) != glyphIDs
+
+ last = glyphIDs[0]
+ ranges = [[last]]
+ for glyphID in glyphIDs[1:]:
+ if glyphID != last + 1:
+ ranges[-1].append(last)
+ ranges.append([glyphID])
+ last = glyphID
+ ranges[-1].append(last)
+
+ if brokenOrder or len(ranges) * 3 < len(glyphs): # 3 words vs. 1 word
+ # Format 2 is more compact
+ index = 0
+ for i in range(len(ranges)):
+ start, end = ranges[i]
+ r = RangeRecord()
+ r.StartID = start
+ r.Start = font.getGlyphName(start)
+ r.End = font.getGlyphName(end)
+ r.StartCoverageIndex = index
+ ranges[i] = r
+ index = index + end - start + 1
+ if brokenOrder:
+ log.warning("GSUB/GPOS Coverage is not sorted by glyph ids.")
+ ranges.sort(key=lambda a: a.StartID)
+ for r in ranges:
+ del r.StartID
+ format = 2
+ rawTable = {"RangeRecord": ranges}
+ # else:
+ # fallthrough; Format 1 is more compact
+ self.Format = format
+ return rawTable
+
+ def toXML2(self, xmlWriter, font):
+ for glyphName in getattr(self, "glyphs", []):
+ xmlWriter.simpletag("Glyph", value=glyphName)
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ glyphs = getattr(self, "glyphs", None)
+ if glyphs is None:
+ glyphs = []
+ self.glyphs = glyphs
+ glyphs.append(attrs["value"])
# The special 0xFFFFFFFF delta-set index is used to indicate that there
@@ -606,986 +632,1077 @@ NO_VARIATION_INDEX = 0xFFFFFFFF
class DeltaSetIndexMap(getFormatSwitchingBaseTableClass("uint8")):
-
- def populateDefaults(self, propagator=None):
- if not hasattr(self, 'mapping'):
- self.mapping = []
-
- def postRead(self, rawTable, font):
- assert (rawTable['EntryFormat'] & 0xFFC0) == 0
- self.mapping = rawTable['mapping']
-
- @staticmethod
- def getEntryFormat(mapping):
- ored = 0
- for idx in mapping:
- ored |= idx
-
- inner = ored & 0xFFFF
- innerBits = 0
- while inner:
- innerBits += 1
- inner >>= 1
- innerBits = max(innerBits, 1)
- assert innerBits <= 16
-
- ored = (ored >> (16-innerBits)) | (ored & ((1<<innerBits)-1))
- if ored <= 0x000000FF:
- entrySize = 1
- elif ored <= 0x0000FFFF:
- entrySize = 2
- elif ored <= 0x00FFFFFF:
- entrySize = 3
- else:
- entrySize = 4
-
- return ((entrySize - 1) << 4) | (innerBits - 1)
-
- def preWrite(self, font):
- mapping = getattr(self, "mapping", None)
- if mapping is None:
- mapping = self.mapping = []
- self.Format = 1 if len(mapping) > 0xFFFF else 0
- rawTable = self.__dict__.copy()
- rawTable['MappingCount'] = len(mapping)
- rawTable['EntryFormat'] = self.getEntryFormat(mapping)
- return rawTable
-
- def toXML2(self, xmlWriter, font):
- # Make xml dump less verbose, by omitting no-op entries like:
- # <Map index="..." outer="65535" inner="65535"/>
- xmlWriter.comment(
- "Omitted values default to 0xFFFF/0xFFFF (no variations)"
- )
- xmlWriter.newline()
- for i, value in enumerate(getattr(self, "mapping", [])):
- attrs = [('index', i)]
- if value != NO_VARIATION_INDEX:
- attrs.extend([
- ('outer', value >> 16),
- ('inner', value & 0xFFFF),
- ])
- xmlWriter.simpletag("Map", attrs)
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- mapping = getattr(self, "mapping", None)
- if mapping is None:
- self.mapping = mapping = []
- index = safeEval(attrs['index'])
- outer = safeEval(attrs.get('outer', '0xFFFF'))
- inner = safeEval(attrs.get('inner', '0xFFFF'))
- assert inner <= 0xFFFF
- mapping.insert(index, (outer << 16) | inner)
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "mapping"):
+ self.mapping = []
+
+ def postRead(self, rawTable, font):
+ assert (rawTable["EntryFormat"] & 0xFFC0) == 0
+ self.mapping = rawTable["mapping"]
+
+ @staticmethod
+ def getEntryFormat(mapping):
+ ored = 0
+ for idx in mapping:
+ ored |= idx
+
+ inner = ored & 0xFFFF
+ innerBits = 0
+ while inner:
+ innerBits += 1
+ inner >>= 1
+ innerBits = max(innerBits, 1)
+ assert innerBits <= 16
+
+ ored = (ored >> (16 - innerBits)) | (ored & ((1 << innerBits) - 1))
+ if ored <= 0x000000FF:
+ entrySize = 1
+ elif ored <= 0x0000FFFF:
+ entrySize = 2
+ elif ored <= 0x00FFFFFF:
+ entrySize = 3
+ else:
+ entrySize = 4
+
+ return ((entrySize - 1) << 4) | (innerBits - 1)
+
+ def preWrite(self, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ mapping = self.mapping = []
+ self.Format = 1 if len(mapping) > 0xFFFF else 0
+ rawTable = self.__dict__.copy()
+ rawTable["MappingCount"] = len(mapping)
+ rawTable["EntryFormat"] = self.getEntryFormat(mapping)
+ return rawTable
+
+ def toXML2(self, xmlWriter, font):
+ # Make xml dump less verbose, by omitting no-op entries like:
+ # <Map index="..." outer="65535" inner="65535"/>
+ xmlWriter.comment("Omitted values default to 0xFFFF/0xFFFF (no variations)")
+ xmlWriter.newline()
+ for i, value in enumerate(getattr(self, "mapping", [])):
+ attrs = [("index", i)]
+ if value != NO_VARIATION_INDEX:
+ attrs.extend(
+ [
+ ("outer", value >> 16),
+ ("inner", value & 0xFFFF),
+ ]
+ )
+ xmlWriter.simpletag("Map", attrs)
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ self.mapping = mapping = []
+ index = safeEval(attrs["index"])
+ outer = safeEval(attrs.get("outer", "0xFFFF"))
+ inner = safeEval(attrs.get("inner", "0xFFFF"))
+ assert inner <= 0xFFFF
+ mapping.insert(index, (outer << 16) | inner)
class VarIdxMap(BaseTable):
-
- def populateDefaults(self, propagator=None):
- if not hasattr(self, 'mapping'):
- self.mapping = {}
-
- def postRead(self, rawTable, font):
- assert (rawTable['EntryFormat'] & 0xFFC0) == 0
- glyphOrder = font.getGlyphOrder()
- mapList = rawTable['mapping']
- mapList.extend([mapList[-1]] * (len(glyphOrder) - len(mapList)))
- self.mapping = dict(zip(glyphOrder, mapList))
-
- def preWrite(self, font):
- mapping = getattr(self, "mapping", None)
- if mapping is None:
- mapping = self.mapping = {}
-
- glyphOrder = font.getGlyphOrder()
- mapping = [mapping[g] for g in glyphOrder]
- while len(mapping) > 1 and mapping[-2] == mapping[-1]:
- del mapping[-1]
-
- rawTable = {'mapping': mapping}
- rawTable['MappingCount'] = len(mapping)
- rawTable['EntryFormat'] = DeltaSetIndexMap.getEntryFormat(mapping)
- return rawTable
-
- def toXML2(self, xmlWriter, font):
- for glyph, value in sorted(getattr(self, "mapping", {}).items()):
- attrs = (
- ('glyph', glyph),
- ('outer', value >> 16),
- ('inner', value & 0xFFFF),
- )
- xmlWriter.simpletag("Map", attrs)
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- mapping = getattr(self, "mapping", None)
- if mapping is None:
- mapping = {}
- self.mapping = mapping
- try:
- glyph = attrs['glyph']
- except: # https://github.com/fonttools/fonttools/commit/21cbab8ce9ded3356fef3745122da64dcaf314e9#commitcomment-27649836
- glyph = font.getGlyphOrder()[attrs['index']]
- outer = safeEval(attrs['outer'])
- inner = safeEval(attrs['inner'])
- assert inner <= 0xFFFF
- mapping[glyph] = (outer << 16) | inner
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "mapping"):
+ self.mapping = {}
+
+ def postRead(self, rawTable, font):
+ assert (rawTable["EntryFormat"] & 0xFFC0) == 0
+ glyphOrder = font.getGlyphOrder()
+ mapList = rawTable["mapping"]
+ mapList.extend([mapList[-1]] * (len(glyphOrder) - len(mapList)))
+ self.mapping = dict(zip(glyphOrder, mapList))
+
+ def preWrite(self, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ mapping = self.mapping = {}
+
+ glyphOrder = font.getGlyphOrder()
+ mapping = [mapping[g] for g in glyphOrder]
+ while len(mapping) > 1 and mapping[-2] == mapping[-1]:
+ del mapping[-1]
+
+ rawTable = {"mapping": mapping}
+ rawTable["MappingCount"] = len(mapping)
+ rawTable["EntryFormat"] = DeltaSetIndexMap.getEntryFormat(mapping)
+ return rawTable
+
+ def toXML2(self, xmlWriter, font):
+ for glyph, value in sorted(getattr(self, "mapping", {}).items()):
+ attrs = (
+ ("glyph", glyph),
+ ("outer", value >> 16),
+ ("inner", value & 0xFFFF),
+ )
+ xmlWriter.simpletag("Map", attrs)
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ mapping = {}
+ self.mapping = mapping
+ try:
+ glyph = attrs["glyph"]
+ except: # https://github.com/fonttools/fonttools/commit/21cbab8ce9ded3356fef3745122da64dcaf314e9#commitcomment-27649836
+ glyph = font.getGlyphOrder()[attrs["index"]]
+ outer = safeEval(attrs["outer"])
+ inner = safeEval(attrs["inner"])
+ assert inner <= 0xFFFF
+ mapping[glyph] = (outer << 16) | inner
class VarRegionList(BaseTable):
-
- def preWrite(self, font):
- # The OT spec says VarStore.VarRegionList.RegionAxisCount should always
- # be equal to the fvar.axisCount, and OTS < v8.0.0 enforces this rule
- # even when the VarRegionList is empty. We can't treat RegionAxisCount
- # like a normal propagated count (== len(Region[i].VarRegionAxis)),
- # otherwise it would default to 0 if VarRegionList is empty.
- # Thus, we force it to always be equal to fvar.axisCount.
- # https://github.com/khaledhosny/ots/pull/192
- fvarTable = font.get("fvar")
- if fvarTable:
- self.RegionAxisCount = len(fvarTable.axes)
- return {
- **self.__dict__,
- "RegionAxisCount": CountReference(self.__dict__, "RegionAxisCount")
- }
+ def preWrite(self, font):
+ # The OT spec says VarStore.VarRegionList.RegionAxisCount should always
+ # be equal to the fvar.axisCount, and OTS < v8.0.0 enforces this rule
+ # even when the VarRegionList is empty. We can't treat RegionAxisCount
+ # like a normal propagated count (== len(Region[i].VarRegionAxis)),
+ # otherwise it would default to 0 if VarRegionList is empty.
+ # Thus, we force it to always be equal to fvar.axisCount.
+ # https://github.com/khaledhosny/ots/pull/192
+ fvarTable = font.get("fvar")
+ if fvarTable:
+ self.RegionAxisCount = len(fvarTable.axes)
+ return {
+ **self.__dict__,
+ "RegionAxisCount": CountReference(self.__dict__, "RegionAxisCount"),
+ }
class SingleSubst(FormatSwitchingBaseTable):
-
- def populateDefaults(self, propagator=None):
- if not hasattr(self, 'mapping'):
- self.mapping = {}
-
- def postRead(self, rawTable, font):
- mapping = {}
- input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
- if self.Format == 1:
- delta = rawTable["DeltaGlyphID"]
- inputGIDS = font.getGlyphIDMany(input)
- outGIDS = [ (glyphID + delta) % 65536 for glyphID in inputGIDS ]
- outNames = font.getGlyphNameMany(outGIDS)
- for inp, out in zip(input, outNames):
- mapping[inp] = out
- elif self.Format == 2:
- assert len(input) == rawTable["GlyphCount"], \
- "invalid SingleSubstFormat2 table"
- subst = rawTable["Substitute"]
- for inp, sub in zip(input, subst):
- mapping[inp] = sub
- else:
- assert 0, "unknown format: %s" % self.Format
- self.mapping = mapping
- del self.Format # Don't need this anymore
-
- def preWrite(self, font):
- mapping = getattr(self, "mapping", None)
- if mapping is None:
- mapping = self.mapping = {}
- items = list(mapping.items())
- getGlyphID = font.getGlyphID
- gidItems = [(getGlyphID(a), getGlyphID(b)) for a,b in items]
- sortableItems = sorted(zip(gidItems, items))
-
- # figure out format
- format = 2
- delta = None
- for inID, outID in gidItems:
- if delta is None:
- delta = (outID - inID) % 65536
-
- if (inID + delta) % 65536 != outID:
- break
- else:
- if delta is None:
- # the mapping is empty, better use format 2
- format = 2
- else:
- format = 1
-
- rawTable = {}
- self.Format = format
- cov = Coverage()
- input = [ item [1][0] for item in sortableItems]
- subst = [ item [1][1] for item in sortableItems]
- cov.glyphs = input
- rawTable["Coverage"] = cov
- if format == 1:
- assert delta is not None
- rawTable["DeltaGlyphID"] = delta
- else:
- rawTable["Substitute"] = subst
- return rawTable
-
- def toXML2(self, xmlWriter, font):
- items = sorted(self.mapping.items())
- for inGlyph, outGlyph in items:
- xmlWriter.simpletag("Substitution",
- [("in", inGlyph), ("out", outGlyph)])
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- mapping = getattr(self, "mapping", None)
- if mapping is None:
- mapping = {}
- self.mapping = mapping
- mapping[attrs["in"]] = attrs["out"]
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "mapping"):
+ self.mapping = {}
+
+ def postRead(self, rawTable, font):
+ mapping = {}
+ input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
+ if self.Format == 1:
+ delta = rawTable["DeltaGlyphID"]
+ inputGIDS = font.getGlyphIDMany(input)
+ outGIDS = [(glyphID + delta) % 65536 for glyphID in inputGIDS]
+ outNames = font.getGlyphNameMany(outGIDS)
+ for inp, out in zip(input, outNames):
+ mapping[inp] = out
+ elif self.Format == 2:
+ assert (
+ len(input) == rawTable["GlyphCount"]
+ ), "invalid SingleSubstFormat2 table"
+ subst = rawTable["Substitute"]
+ for inp, sub in zip(input, subst):
+ mapping[inp] = sub
+ else:
+ assert 0, "unknown format: %s" % self.Format
+ self.mapping = mapping
+ del self.Format # Don't need this anymore
+
+ def preWrite(self, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ mapping = self.mapping = {}
+ items = list(mapping.items())
+ getGlyphID = font.getGlyphID
+ gidItems = [(getGlyphID(a), getGlyphID(b)) for a, b in items]
+ sortableItems = sorted(zip(gidItems, items))
+
+ # figure out format
+ format = 2
+ delta = None
+ for inID, outID in gidItems:
+ if delta is None:
+ delta = (outID - inID) % 65536
+
+ if (inID + delta) % 65536 != outID:
+ break
+ else:
+ if delta is None:
+ # the mapping is empty, better use format 2
+ format = 2
+ else:
+ format = 1
+
+ rawTable = {}
+ self.Format = format
+ cov = Coverage()
+ input = [item[1][0] for item in sortableItems]
+ subst = [item[1][1] for item in sortableItems]
+ cov.glyphs = input
+ rawTable["Coverage"] = cov
+ if format == 1:
+ assert delta is not None
+ rawTable["DeltaGlyphID"] = delta
+ else:
+ rawTable["Substitute"] = subst
+ return rawTable
+
+ def toXML2(self, xmlWriter, font):
+ items = sorted(self.mapping.items())
+ for inGlyph, outGlyph in items:
+ xmlWriter.simpletag("Substitution", [("in", inGlyph), ("out", outGlyph)])
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ mapping = {}
+ self.mapping = mapping
+ mapping[attrs["in"]] = attrs["out"]
class MultipleSubst(FormatSwitchingBaseTable):
-
- def populateDefaults(self, propagator=None):
- if not hasattr(self, 'mapping'):
- self.mapping = {}
-
- def postRead(self, rawTable, font):
- mapping = {}
- if self.Format == 1:
- glyphs = _getGlyphsFromCoverageTable(rawTable["Coverage"])
- subst = [s.Substitute for s in rawTable["Sequence"]]
- mapping = dict(zip(glyphs, subst))
- else:
- assert 0, "unknown format: %s" % self.Format
- self.mapping = mapping
- del self.Format # Don't need this anymore
-
- def preWrite(self, font):
- mapping = getattr(self, "mapping", None)
- if mapping is None:
- mapping = self.mapping = {}
- cov = Coverage()
- cov.glyphs = sorted(list(mapping.keys()), key=font.getGlyphID)
- self.Format = 1
- rawTable = {
- "Coverage": cov,
- "Sequence": [self.makeSequence_(mapping[glyph])
- for glyph in cov.glyphs],
- }
- return rawTable
-
- def toXML2(self, xmlWriter, font):
- items = sorted(self.mapping.items())
- for inGlyph, outGlyphs in items:
- out = ",".join(outGlyphs)
- xmlWriter.simpletag("Substitution",
- [("in", inGlyph), ("out", out)])
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- mapping = getattr(self, "mapping", None)
- if mapping is None:
- mapping = {}
- self.mapping = mapping
-
- # TTX v3.0 and earlier.
- if name == "Coverage":
- self.old_coverage_ = []
- for element in content:
- if not isinstance(element, tuple):
- continue
- element_name, element_attrs, _ = element
- if element_name == "Glyph":
- self.old_coverage_.append(element_attrs["value"])
- return
- if name == "Sequence":
- index = int(attrs.get("index", len(mapping)))
- glyph = self.old_coverage_[index]
- glyph_mapping = mapping[glyph] = []
- for element in content:
- if not isinstance(element, tuple):
- continue
- element_name, element_attrs, _ = element
- if element_name == "Substitute":
- glyph_mapping.append(element_attrs["value"])
- return
-
- # TTX v3.1 and later.
- outGlyphs = attrs["out"].split(",") if attrs["out"] else []
- mapping[attrs["in"]] = [g.strip() for g in outGlyphs]
-
- @staticmethod
- def makeSequence_(g):
- seq = Sequence()
- seq.Substitute = g
- return seq
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "mapping"):
+ self.mapping = {}
+
+ def postRead(self, rawTable, font):
+ mapping = {}
+ if self.Format == 1:
+ glyphs = _getGlyphsFromCoverageTable(rawTable["Coverage"])
+ subst = [s.Substitute for s in rawTable["Sequence"]]
+ mapping = dict(zip(glyphs, subst))
+ else:
+ assert 0, "unknown format: %s" % self.Format
+ self.mapping = mapping
+ del self.Format # Don't need this anymore
+
+ def preWrite(self, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ mapping = self.mapping = {}
+ cov = Coverage()
+ cov.glyphs = sorted(list(mapping.keys()), key=font.getGlyphID)
+ self.Format = 1
+ rawTable = {
+ "Coverage": cov,
+ "Sequence": [self.makeSequence_(mapping[glyph]) for glyph in cov.glyphs],
+ }
+ return rawTable
+
+ def toXML2(self, xmlWriter, font):
+ items = sorted(self.mapping.items())
+ for inGlyph, outGlyphs in items:
+ out = ",".join(outGlyphs)
+ xmlWriter.simpletag("Substitution", [("in", inGlyph), ("out", out)])
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ mapping = {}
+ self.mapping = mapping
+
+ # TTX v3.0 and earlier.
+ if name == "Coverage":
+ self.old_coverage_ = []
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ element_name, element_attrs, _ = element
+ if element_name == "Glyph":
+ self.old_coverage_.append(element_attrs["value"])
+ return
+ if name == "Sequence":
+ index = int(attrs.get("index", len(mapping)))
+ glyph = self.old_coverage_[index]
+ glyph_mapping = mapping[glyph] = []
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ element_name, element_attrs, _ = element
+ if element_name == "Substitute":
+ glyph_mapping.append(element_attrs["value"])
+ return
+
+ # TTX v3.1 and later.
+ outGlyphs = attrs["out"].split(",") if attrs["out"] else []
+ mapping[attrs["in"]] = [g.strip() for g in outGlyphs]
+
+ @staticmethod
+ def makeSequence_(g):
+ seq = Sequence()
+ seq.Substitute = g
+ return seq
class ClassDef(FormatSwitchingBaseTable):
-
- def populateDefaults(self, propagator=None):
- if not hasattr(self, 'classDefs'):
- self.classDefs = {}
-
- def postRead(self, rawTable, font):
- classDefs = {}
-
- if self.Format == 1:
- start = rawTable["StartGlyph"]
- classList = rawTable["ClassValueArray"]
- startID = font.getGlyphID(start)
- endID = startID + len(classList)
- glyphNames = font.getGlyphNameMany(range(startID, endID))
- for glyphName, cls in zip(glyphNames, classList):
- if cls:
- classDefs[glyphName] = cls
-
- elif self.Format == 2:
- records = rawTable["ClassRangeRecord"]
- for rec in records:
- cls = rec.Class
- if not cls:
- continue
- start = rec.Start
- end = rec.End
- startID = font.getGlyphID(start)
- endID = font.getGlyphID(end) + 1
- glyphNames = font.getGlyphNameMany(range(startID, endID))
- for glyphName in glyphNames:
- classDefs[glyphName] = cls
- else:
- log.warning("Unknown ClassDef format: %s", self.Format)
- self.classDefs = classDefs
- del self.Format # Don't need this anymore
-
- def _getClassRanges(self, font):
- classDefs = getattr(self, "classDefs", None)
- if classDefs is None:
- self.classDefs = {}
- return
- getGlyphID = font.getGlyphID
- items = []
- for glyphName, cls in classDefs.items():
- if not cls:
- continue
- items.append((getGlyphID(glyphName), glyphName, cls))
- if items:
- items.sort()
- last, lastName, lastCls = items[0]
- ranges = [[lastCls, last, lastName]]
- for glyphID, glyphName, cls in items[1:]:
- if glyphID != last + 1 or cls != lastCls:
- ranges[-1].extend([last, lastName])
- ranges.append([cls, glyphID, glyphName])
- last = glyphID
- lastName = glyphName
- lastCls = cls
- ranges[-1].extend([last, lastName])
- return ranges
-
- def preWrite(self, font):
- format = 2
- rawTable = {"ClassRangeRecord": []}
- ranges = self._getClassRanges(font)
- if ranges:
- startGlyph = ranges[0][1]
- endGlyph = ranges[-1][3]
- glyphCount = endGlyph - startGlyph + 1
- if len(ranges) * 3 < glyphCount + 1:
- # Format 2 is more compact
- for i in range(len(ranges)):
- cls, start, startName, end, endName = ranges[i]
- rec = ClassRangeRecord()
- rec.Start = startName
- rec.End = endName
- rec.Class = cls
- ranges[i] = rec
- format = 2
- rawTable = {"ClassRangeRecord": ranges}
- else:
- # Format 1 is more compact
- startGlyphName = ranges[0][2]
- classes = [0] * glyphCount
- for cls, start, startName, end, endName in ranges:
- for g in range(start - startGlyph, end - startGlyph + 1):
- classes[g] = cls
- format = 1
- rawTable = {"StartGlyph": startGlyphName, "ClassValueArray": classes}
- self.Format = format
- return rawTable
-
- def toXML2(self, xmlWriter, font):
- items = sorted(self.classDefs.items())
- for glyphName, cls in items:
- xmlWriter.simpletag("ClassDef", [("glyph", glyphName), ("class", cls)])
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- classDefs = getattr(self, "classDefs", None)
- if classDefs is None:
- classDefs = {}
- self.classDefs = classDefs
- classDefs[attrs["glyph"]] = int(attrs["class"])
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "classDefs"):
+ self.classDefs = {}
+
+ def postRead(self, rawTable, font):
+ classDefs = {}
+
+ if self.Format == 1:
+ start = rawTable["StartGlyph"]
+ classList = rawTable["ClassValueArray"]
+ startID = font.getGlyphID(start)
+ endID = startID + len(classList)
+ glyphNames = font.getGlyphNameMany(range(startID, endID))
+ for glyphName, cls in zip(glyphNames, classList):
+ if cls:
+ classDefs[glyphName] = cls
+
+ elif self.Format == 2:
+ records = rawTable["ClassRangeRecord"]
+ for rec in records:
+ cls = rec.Class
+ if not cls:
+ continue
+ start = rec.Start
+ end = rec.End
+ startID = font.getGlyphID(start)
+ endID = font.getGlyphID(end) + 1
+ glyphNames = font.getGlyphNameMany(range(startID, endID))
+ for glyphName in glyphNames:
+ classDefs[glyphName] = cls
+ else:
+ log.warning("Unknown ClassDef format: %s", self.Format)
+ self.classDefs = classDefs
+ del self.Format # Don't need this anymore
+
+ def _getClassRanges(self, font):
+ classDefs = getattr(self, "classDefs", None)
+ if classDefs is None:
+ self.classDefs = {}
+ return
+ getGlyphID = font.getGlyphID
+ items = []
+ for glyphName, cls in classDefs.items():
+ if not cls:
+ continue
+ items.append((getGlyphID(glyphName), glyphName, cls))
+ if items:
+ items.sort()
+ last, lastName, lastCls = items[0]
+ ranges = [[lastCls, last, lastName]]
+ for glyphID, glyphName, cls in items[1:]:
+ if glyphID != last + 1 or cls != lastCls:
+ ranges[-1].extend([last, lastName])
+ ranges.append([cls, glyphID, glyphName])
+ last = glyphID
+ lastName = glyphName
+ lastCls = cls
+ ranges[-1].extend([last, lastName])
+ return ranges
+
+ def preWrite(self, font):
+ format = 2
+ rawTable = {"ClassRangeRecord": []}
+ ranges = self._getClassRanges(font)
+ if ranges:
+ startGlyph = ranges[0][1]
+ endGlyph = ranges[-1][3]
+ glyphCount = endGlyph - startGlyph + 1
+ if len(ranges) * 3 < glyphCount + 1:
+ # Format 2 is more compact
+ for i in range(len(ranges)):
+ cls, start, startName, end, endName = ranges[i]
+ rec = ClassRangeRecord()
+ rec.Start = startName
+ rec.End = endName
+ rec.Class = cls
+ ranges[i] = rec
+ format = 2
+ rawTable = {"ClassRangeRecord": ranges}
+ else:
+ # Format 1 is more compact
+ startGlyphName = ranges[0][2]
+ classes = [0] * glyphCount
+ for cls, start, startName, end, endName in ranges:
+ for g in range(start - startGlyph, end - startGlyph + 1):
+ classes[g] = cls
+ format = 1
+ rawTable = {"StartGlyph": startGlyphName, "ClassValueArray": classes}
+ self.Format = format
+ return rawTable
+
+ def toXML2(self, xmlWriter, font):
+ items = sorted(self.classDefs.items())
+ for glyphName, cls in items:
+ xmlWriter.simpletag("ClassDef", [("glyph", glyphName), ("class", cls)])
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ classDefs = getattr(self, "classDefs", None)
+ if classDefs is None:
+ classDefs = {}
+ self.classDefs = classDefs
+ classDefs[attrs["glyph"]] = int(attrs["class"])
class AlternateSubst(FormatSwitchingBaseTable):
-
- def populateDefaults(self, propagator=None):
- if not hasattr(self, 'alternates'):
- self.alternates = {}
-
- def postRead(self, rawTable, font):
- alternates = {}
- if self.Format == 1:
- input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
- alts = rawTable["AlternateSet"]
- assert len(input) == len(alts)
- for inp,alt in zip(input,alts):
- alternates[inp] = alt.Alternate
- else:
- assert 0, "unknown format: %s" % self.Format
- self.alternates = alternates
- del self.Format # Don't need this anymore
-
- def preWrite(self, font):
- self.Format = 1
- alternates = getattr(self, "alternates", None)
- if alternates is None:
- alternates = self.alternates = {}
- items = list(alternates.items())
- for i in range(len(items)):
- glyphName, set = items[i]
- items[i] = font.getGlyphID(glyphName), glyphName, set
- items.sort()
- cov = Coverage()
- cov.glyphs = [ item[1] for item in items]
- alternates = []
- setList = [ item[-1] for item in items]
- for set in setList:
- alts = AlternateSet()
- alts.Alternate = set
- alternates.append(alts)
- # a special case to deal with the fact that several hundred Adobe Japan1-5
- # CJK fonts will overflow an offset if the coverage table isn't pushed to the end.
- # Also useful in that when splitting a sub-table because of an offset overflow
- # I don't need to calculate the change in the subtable offset due to the change in the coverage table size.
- # Allows packing more rules in subtable.
- self.sortCoverageLast = 1
- return {"Coverage": cov, "AlternateSet": alternates}
-
- def toXML2(self, xmlWriter, font):
- items = sorted(self.alternates.items())
- for glyphName, alternates in items:
- xmlWriter.begintag("AlternateSet", glyph=glyphName)
- xmlWriter.newline()
- for alt in alternates:
- xmlWriter.simpletag("Alternate", glyph=alt)
- xmlWriter.newline()
- xmlWriter.endtag("AlternateSet")
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- alternates = getattr(self, "alternates", None)
- if alternates is None:
- alternates = {}
- self.alternates = alternates
- glyphName = attrs["glyph"]
- set = []
- alternates[glyphName] = set
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- set.append(attrs["glyph"])
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "alternates"):
+ self.alternates = {}
+
+ def postRead(self, rawTable, font):
+ alternates = {}
+ if self.Format == 1:
+ input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
+ alts = rawTable["AlternateSet"]
+ assert len(input) == len(alts)
+ for inp, alt in zip(input, alts):
+ alternates[inp] = alt.Alternate
+ else:
+ assert 0, "unknown format: %s" % self.Format
+ self.alternates = alternates
+ del self.Format # Don't need this anymore
+
+ def preWrite(self, font):
+ self.Format = 1
+ alternates = getattr(self, "alternates", None)
+ if alternates is None:
+ alternates = self.alternates = {}
+ items = list(alternates.items())
+ for i in range(len(items)):
+ glyphName, set = items[i]
+ items[i] = font.getGlyphID(glyphName), glyphName, set
+ items.sort()
+ cov = Coverage()
+ cov.glyphs = [item[1] for item in items]
+ alternates = []
+ setList = [item[-1] for item in items]
+ for set in setList:
+ alts = AlternateSet()
+ alts.Alternate = set
+ alternates.append(alts)
+ # a special case to deal with the fact that several hundred Adobe Japan1-5
+ # CJK fonts will overflow an offset if the coverage table isn't pushed to the end.
+ # Also useful in that when splitting a sub-table because of an offset overflow
+ # I don't need to calculate the change in the subtable offset due to the change in the coverage table size.
+ # Allows packing more rules in subtable.
+ self.sortCoverageLast = 1
+ return {"Coverage": cov, "AlternateSet": alternates}
+
+ def toXML2(self, xmlWriter, font):
+ items = sorted(self.alternates.items())
+ for glyphName, alternates in items:
+ xmlWriter.begintag("AlternateSet", glyph=glyphName)
+ xmlWriter.newline()
+ for alt in alternates:
+ xmlWriter.simpletag("Alternate", glyph=alt)
+ xmlWriter.newline()
+ xmlWriter.endtag("AlternateSet")
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ alternates = getattr(self, "alternates", None)
+ if alternates is None:
+ alternates = {}
+ self.alternates = alternates
+ glyphName = attrs["glyph"]
+ set = []
+ alternates[glyphName] = set
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ set.append(attrs["glyph"])
class LigatureSubst(FormatSwitchingBaseTable):
-
- def populateDefaults(self, propagator=None):
- if not hasattr(self, 'ligatures'):
- self.ligatures = {}
-
- def postRead(self, rawTable, font):
- ligatures = {}
- if self.Format == 1:
- input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
- ligSets = rawTable["LigatureSet"]
- assert len(input) == len(ligSets)
- for i in range(len(input)):
- ligatures[input[i]] = ligSets[i].Ligature
- else:
- assert 0, "unknown format: %s" % self.Format
- self.ligatures = ligatures
- del self.Format # Don't need this anymore
-
- def preWrite(self, font):
- self.Format = 1
- ligatures = getattr(self, "ligatures", None)
- if ligatures is None:
- ligatures = self.ligatures = {}
-
- if ligatures and isinstance(next(iter(ligatures)), tuple):
- # New high-level API in v3.1 and later. Note that we just support compiling this
- # for now. We don't load to this API, and don't do XML with it.
-
- # ligatures is map from components-sequence to lig-glyph
- newLigatures = dict()
- for comps,lig in sorted(ligatures.items(), key=lambda item: (-len(item[0]), item[0])):
- ligature = Ligature()
- ligature.Component = comps[1:]
- ligature.CompCount = len(comps)
- ligature.LigGlyph = lig
- newLigatures.setdefault(comps[0], []).append(ligature)
- ligatures = newLigatures
-
- items = list(ligatures.items())
- for i in range(len(items)):
- glyphName, set = items[i]
- items[i] = font.getGlyphID(glyphName), glyphName, set
- items.sort()
- cov = Coverage()
- cov.glyphs = [ item[1] for item in items]
-
- ligSets = []
- setList = [ item[-1] for item in items ]
- for set in setList:
- ligSet = LigatureSet()
- ligs = ligSet.Ligature = []
- for lig in set:
- ligs.append(lig)
- ligSets.append(ligSet)
- # Useful in that when splitting a sub-table because of an offset overflow
- # I don't need to calculate the change in subtabl offset due to the coverage table size.
- # Allows packing more rules in subtable.
- self.sortCoverageLast = 1
- return {"Coverage": cov, "LigatureSet": ligSets}
-
- def toXML2(self, xmlWriter, font):
- items = sorted(self.ligatures.items())
- for glyphName, ligSets in items:
- xmlWriter.begintag("LigatureSet", glyph=glyphName)
- xmlWriter.newline()
- for lig in ligSets:
- xmlWriter.simpletag("Ligature", glyph=lig.LigGlyph,
- components=",".join(lig.Component))
- xmlWriter.newline()
- xmlWriter.endtag("LigatureSet")
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- ligatures = getattr(self, "ligatures", None)
- if ligatures is None:
- ligatures = {}
- self.ligatures = ligatures
- glyphName = attrs["glyph"]
- ligs = []
- ligatures[glyphName] = ligs
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- lig = Ligature()
- lig.LigGlyph = attrs["glyph"]
- components = attrs["components"]
- lig.Component = components.split(",") if components else []
- lig.CompCount = len(lig.Component)
- ligs.append(lig)
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "ligatures"):
+ self.ligatures = {}
+
+ def postRead(self, rawTable, font):
+ ligatures = {}
+ if self.Format == 1:
+ input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
+ ligSets = rawTable["LigatureSet"]
+ assert len(input) == len(ligSets)
+ for i in range(len(input)):
+ ligatures[input[i]] = ligSets[i].Ligature
+ else:
+ assert 0, "unknown format: %s" % self.Format
+ self.ligatures = ligatures
+ del self.Format # Don't need this anymore
+
+ def preWrite(self, font):
+ self.Format = 1
+ ligatures = getattr(self, "ligatures", None)
+ if ligatures is None:
+ ligatures = self.ligatures = {}
+
+ if ligatures and isinstance(next(iter(ligatures)), tuple):
+ # New high-level API in v3.1 and later. Note that we just support compiling this
+ # for now. We don't load to this API, and don't do XML with it.
+
+ # ligatures is map from components-sequence to lig-glyph
+ newLigatures = dict()
+ for comps, lig in sorted(
+ ligatures.items(), key=lambda item: (-len(item[0]), item[0])
+ ):
+ ligature = Ligature()
+ ligature.Component = comps[1:]
+ ligature.CompCount = len(comps)
+ ligature.LigGlyph = lig
+ newLigatures.setdefault(comps[0], []).append(ligature)
+ ligatures = newLigatures
+
+ items = list(ligatures.items())
+ for i in range(len(items)):
+ glyphName, set = items[i]
+ items[i] = font.getGlyphID(glyphName), glyphName, set
+ items.sort()
+ cov = Coverage()
+ cov.glyphs = [item[1] for item in items]
+
+ ligSets = []
+ setList = [item[-1] for item in items]
+ for set in setList:
+ ligSet = LigatureSet()
+ ligs = ligSet.Ligature = []
+ for lig in set:
+ ligs.append(lig)
+ ligSets.append(ligSet)
+ # Useful in that when splitting a sub-table because of an offset overflow
+ # I don't need to calculate the change in subtabl offset due to the coverage table size.
+ # Allows packing more rules in subtable.
+ self.sortCoverageLast = 1
+ return {"Coverage": cov, "LigatureSet": ligSets}
+
+ def toXML2(self, xmlWriter, font):
+ items = sorted(self.ligatures.items())
+ for glyphName, ligSets in items:
+ xmlWriter.begintag("LigatureSet", glyph=glyphName)
+ xmlWriter.newline()
+ for lig in ligSets:
+ xmlWriter.simpletag(
+ "Ligature", glyph=lig.LigGlyph, components=",".join(lig.Component)
+ )
+ xmlWriter.newline()
+ xmlWriter.endtag("LigatureSet")
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ ligatures = getattr(self, "ligatures", None)
+ if ligatures is None:
+ ligatures = {}
+ self.ligatures = ligatures
+ glyphName = attrs["glyph"]
+ ligs = []
+ ligatures[glyphName] = ligs
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ lig = Ligature()
+ lig.LigGlyph = attrs["glyph"]
+ components = attrs["components"]
+ lig.Component = components.split(",") if components else []
+ lig.CompCount = len(lig.Component)
+ ligs.append(lig)
class COLR(BaseTable):
+ def decompile(self, reader, font):
+ # COLRv0 is exceptional in that LayerRecordCount appears *after* the
+ # LayerRecordArray it counts, but the parser logic expects Count fields
+ # to always precede the arrays. Here we work around this by parsing the
+ # LayerRecordCount before the rest of the table, and storing it in
+ # the reader's local state.
+ subReader = reader.getSubReader(offset=0)
+ for conv in self.getConverters():
+ if conv.name != "LayerRecordCount":
+ subReader.advance(conv.staticSize)
+ continue
+ reader[conv.name] = conv.read(subReader, font, tableDict={})
+ break
+ else:
+ raise AssertionError("LayerRecordCount converter not found")
+ return BaseTable.decompile(self, reader, font)
+
+ def preWrite(self, font):
+ # The writer similarly assumes Count values precede the things counted,
+ # thus here we pre-initialize a CountReference; the actual count value
+ # will be set to the lenght of the array by the time this is assembled.
+ self.LayerRecordCount = None
+ return {
+ **self.__dict__,
+ "LayerRecordCount": CountReference(self.__dict__, "LayerRecordCount"),
+ }
+
+ def computeClipBoxes(self, glyphSet: "_TTGlyphSet", quantization: int = 1):
+ if self.Version == 0:
+ return
+
+ clips = {}
+ for rec in self.BaseGlyphList.BaseGlyphPaintRecord:
+ try:
+ clipBox = rec.Paint.computeClipBox(self, glyphSet, quantization)
+ except Exception as e:
+ from fontTools.ttLib import TTLibError
- def decompile(self, reader, font):
- # COLRv0 is exceptional in that LayerRecordCount appears *after* the
- # LayerRecordArray it counts, but the parser logic expects Count fields
- # to always precede the arrays. Here we work around this by parsing the
- # LayerRecordCount before the rest of the table, and storing it in
- # the reader's local state.
- subReader = reader.getSubReader(offset=0)
- for conv in self.getConverters():
- if conv.name != "LayerRecordCount":
- subReader.advance(conv.staticSize)
- continue
- reader[conv.name] = conv.read(subReader, font, tableDict={})
- break
- else:
- raise AssertionError("LayerRecordCount converter not found")
- return BaseTable.decompile(self, reader, font)
-
- def preWrite(self, font):
- # The writer similarly assumes Count values precede the things counted,
- # thus here we pre-initialize a CountReference; the actual count value
- # will be set to the lenght of the array by the time this is assembled.
- self.LayerRecordCount = None
- return {
- **self.__dict__,
- "LayerRecordCount": CountReference(self.__dict__, "LayerRecordCount")
- }
+ raise TTLibError(
+ f"Failed to compute COLR ClipBox for {rec.BaseGlyph!r}"
+ ) from e
+
+ if clipBox is not None:
+ clips[rec.BaseGlyph] = clipBox
+
+ hasClipList = hasattr(self, "ClipList") and self.ClipList is not None
+ if not clips:
+ if hasClipList:
+ self.ClipList = None
+ else:
+ if not hasClipList:
+ self.ClipList = ClipList()
+ self.ClipList.Format = 1
+ self.ClipList.clips = clips
class LookupList(BaseTable):
- @property
- def table(self):
- for l in self.Lookup:
- for st in l.SubTable:
- if type(st).__name__.endswith("Subst"):
- return "GSUB"
- if type(st).__name__.endswith("Pos"):
- return "GPOS"
- raise ValueError
-
- def toXML2(self, xmlWriter, font):
- if not font or "Debg" not in font or LOOKUP_DEBUG_INFO_KEY not in font["Debg"].data:
- return super().toXML2(xmlWriter, font)
- debugData = font["Debg"].data[LOOKUP_DEBUG_INFO_KEY][self.table]
- for conv in self.getConverters():
- if conv.repeat:
- value = getattr(self, conv.name, [])
- for lookupIndex, item in enumerate(value):
- if str(lookupIndex) in debugData:
- info = LookupDebugInfo(*debugData[str(lookupIndex)])
- tag = info.location
- if info.name:
- tag = f'{info.name}: {tag}'
- if info.feature:
- script,language,feature = info.feature
- tag = f'{tag} in {feature} ({script}/{language})'
- xmlWriter.comment(tag)
- xmlWriter.newline()
-
- conv.xmlWrite(xmlWriter, font, item, conv.name,
- [("index", lookupIndex)])
- else:
- if conv.aux and not eval(conv.aux, None, vars(self)):
- continue
- value = getattr(self, conv.name, None) # TODO Handle defaults instead of defaulting to None!
- conv.xmlWrite(xmlWriter, font, value, conv.name, [])
+ @property
+ def table(self):
+ for l in self.Lookup:
+ for st in l.SubTable:
+ if type(st).__name__.endswith("Subst"):
+ return "GSUB"
+ if type(st).__name__.endswith("Pos"):
+ return "GPOS"
+ raise ValueError
+
+ def toXML2(self, xmlWriter, font):
+ if (
+ not font
+ or "Debg" not in font
+ or LOOKUP_DEBUG_INFO_KEY not in font["Debg"].data
+ ):
+ return super().toXML2(xmlWriter, font)
+ debugData = font["Debg"].data[LOOKUP_DEBUG_INFO_KEY][self.table]
+ for conv in self.getConverters():
+ if conv.repeat:
+ value = getattr(self, conv.name, [])
+ for lookupIndex, item in enumerate(value):
+ if str(lookupIndex) in debugData:
+ info = LookupDebugInfo(*debugData[str(lookupIndex)])
+ tag = info.location
+ if info.name:
+ tag = f"{info.name}: {tag}"
+ if info.feature:
+ script, language, feature = info.feature
+ tag = f"{tag} in {feature} ({script}/{language})"
+ xmlWriter.comment(tag)
+ xmlWriter.newline()
+
+ conv.xmlWrite(
+ xmlWriter, font, item, conv.name, [("index", lookupIndex)]
+ )
+ else:
+ if conv.aux and not eval(conv.aux, None, vars(self)):
+ continue
+ value = getattr(
+ self, conv.name, None
+ ) # TODO Handle defaults instead of defaulting to None!
+ conv.xmlWrite(xmlWriter, font, value, conv.name, [])
-class BaseGlyphRecordArray(BaseTable):
- def preWrite(self, font):
- self.BaseGlyphRecord = sorted(
- self.BaseGlyphRecord,
- key=lambda rec: font.getGlyphID(rec.BaseGlyph)
- )
- return self.__dict__.copy()
+class BaseGlyphRecordArray(BaseTable):
+ def preWrite(self, font):
+ self.BaseGlyphRecord = sorted(
+ self.BaseGlyphRecord, key=lambda rec: font.getGlyphID(rec.BaseGlyph)
+ )
+ return self.__dict__.copy()
class BaseGlyphList(BaseTable):
-
- def preWrite(self, font):
- self.BaseGlyphPaintRecord = sorted(
- self.BaseGlyphPaintRecord,
- key=lambda rec: font.getGlyphID(rec.BaseGlyph)
- )
- return self.__dict__.copy()
+ def preWrite(self, font):
+ self.BaseGlyphPaintRecord = sorted(
+ self.BaseGlyphPaintRecord, key=lambda rec: font.getGlyphID(rec.BaseGlyph)
+ )
+ return self.__dict__.copy()
class ClipBoxFormat(IntEnum):
- Static = 1
- Variable = 2
+ Static = 1
+ Variable = 2
- def is_variable(self):
- return self is self.Variable
+ def is_variable(self):
+ return self is self.Variable
- def as_variable(self):
- return self.Variable
+ def as_variable(self):
+ return self.Variable
class ClipBox(getFormatSwitchingBaseTableClass("uint8")):
- formatEnum = ClipBoxFormat
+ formatEnum = ClipBoxFormat
- def as_tuple(self):
- return tuple(getattr(self, conv.name) for conv in self.getConverters())
+ def as_tuple(self):
+ return tuple(getattr(self, conv.name) for conv in self.getConverters())
- def __repr__(self):
- return f"{self.__class__.__name__}{self.as_tuple()}"
+ def __repr__(self):
+ return f"{self.__class__.__name__}{self.as_tuple()}"
class ClipList(getFormatSwitchingBaseTableClass("uint8")):
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "clips"):
+ self.clips = {}
+
+ def postRead(self, rawTable, font):
+ clips = {}
+ glyphOrder = font.getGlyphOrder()
+ for i, rec in enumerate(rawTable["ClipRecord"]):
+ if rec.StartGlyphID > rec.EndGlyphID:
+ log.warning(
+ "invalid ClipRecord[%i].StartGlyphID (%i) > "
+ "EndGlyphID (%i); skipped",
+ i,
+ rec.StartGlyphID,
+ rec.EndGlyphID,
+ )
+ continue
+ redefinedGlyphs = []
+ missingGlyphs = []
+ for glyphID in range(rec.StartGlyphID, rec.EndGlyphID + 1):
+ try:
+ glyph = glyphOrder[glyphID]
+ except IndexError:
+ missingGlyphs.append(glyphID)
+ continue
+ if glyph not in clips:
+ clips[glyph] = copy.copy(rec.ClipBox)
+ else:
+ redefinedGlyphs.append(glyphID)
+ if redefinedGlyphs:
+ log.warning(
+ "ClipRecord[%i] overlaps previous records; "
+ "ignoring redefined clip boxes for the "
+ "following glyph ID range: [%i-%i]",
+ i,
+ min(redefinedGlyphs),
+ max(redefinedGlyphs),
+ )
+ if missingGlyphs:
+ log.warning(
+ "ClipRecord[%i] range references missing " "glyph IDs: [%i-%i]",
+ i,
+ min(missingGlyphs),
+ max(missingGlyphs),
+ )
+ self.clips = clips
+
+ def groups(self):
+ glyphsByClip = defaultdict(list)
+ uniqueClips = {}
+ for glyphName, clipBox in self.clips.items():
+ key = clipBox.as_tuple()
+ glyphsByClip[key].append(glyphName)
+ if key not in uniqueClips:
+ uniqueClips[key] = clipBox
+ return {
+ frozenset(glyphs): uniqueClips[key] for key, glyphs in glyphsByClip.items()
+ }
- def populateDefaults(self, propagator=None):
- if not hasattr(self, "clips"):
- self.clips = {}
-
- def postRead(self, rawTable, font):
- clips = {}
- glyphOrder = font.getGlyphOrder()
- for i, rec in enumerate(rawTable["ClipRecord"]):
- if rec.StartGlyphID > rec.EndGlyphID:
- log.warning(
- "invalid ClipRecord[%i].StartGlyphID (%i) > "
- "EndGlyphID (%i); skipped",
- i,
- rec.StartGlyphID,
- rec.EndGlyphID,
- )
- continue
- redefinedGlyphs = []
- missingGlyphs = []
- for glyphID in range(rec.StartGlyphID, rec.EndGlyphID + 1):
- try:
- glyph = glyphOrder[glyphID]
- except IndexError:
- missingGlyphs.append(glyphID)
- continue
- if glyph not in clips:
- clips[glyph] = copy.copy(rec.ClipBox)
- else:
- redefinedGlyphs.append(glyphID)
- if redefinedGlyphs:
- log.warning(
- "ClipRecord[%i] overlaps previous records; "
- "ignoring redefined clip boxes for the "
- "following glyph ID range: [%i-%i]",
- i,
- min(redefinedGlyphs),
- max(redefinedGlyphs),
- )
- if missingGlyphs:
- log.warning(
- "ClipRecord[%i] range references missing "
- "glyph IDs: [%i-%i]",
- i,
- min(missingGlyphs),
- max(missingGlyphs),
- )
- self.clips = clips
-
- def groups(self):
- glyphsByClip = defaultdict(list)
- uniqueClips = {}
- for glyphName, clipBox in self.clips.items():
- key = clipBox.as_tuple()
- glyphsByClip[key].append(glyphName)
- if key not in uniqueClips:
- uniqueClips[key] = clipBox
- return {
- frozenset(glyphs): uniqueClips[key]
- for key, glyphs in glyphsByClip.items()
- }
-
- def preWrite(self, font):
- if not hasattr(self, "clips"):
- self.clips = {}
- clipBoxRanges = {}
- glyphMap = font.getReverseGlyphMap()
- for glyphs, clipBox in self.groups().items():
- glyphIDs = sorted(
- glyphMap[glyphName] for glyphName in glyphs
- if glyphName in glyphMap
- )
- if not glyphIDs:
- continue
- last = glyphIDs[0]
- ranges = [[last]]
- for glyphID in glyphIDs[1:]:
- if glyphID != last + 1:
- ranges[-1].append(last)
- ranges.append([glyphID])
- last = glyphID
- ranges[-1].append(last)
- for start, end in ranges:
- assert (start, end) not in clipBoxRanges
- clipBoxRanges[(start, end)] = clipBox
-
- clipRecords = []
- for (start, end), clipBox in sorted(clipBoxRanges.items()):
- record = ClipRecord()
- record.StartGlyphID = start
- record.EndGlyphID = end
- record.ClipBox = clipBox
- clipRecords.append(record)
- rawTable = {
- "ClipCount": len(clipRecords),
- "ClipRecord": clipRecords,
- }
- return rawTable
-
- def toXML(self, xmlWriter, font, attrs=None, name=None):
- tableName = name if name else self.__class__.__name__
- if attrs is None:
- attrs = []
- if hasattr(self, "Format"):
- attrs.append(("Format", self.Format))
- xmlWriter.begintag(tableName, attrs)
- xmlWriter.newline()
- # sort clips alphabetically to ensure deterministic XML dump
- for glyphs, clipBox in sorted(
- self.groups().items(), key=lambda item: min(item[0])
- ):
- xmlWriter.begintag("Clip")
- xmlWriter.newline()
- for glyphName in sorted(glyphs):
- xmlWriter.simpletag("Glyph", value=glyphName)
- xmlWriter.newline()
- xmlWriter.begintag("ClipBox", [("Format", clipBox.Format)])
- xmlWriter.newline()
- clipBox.toXML2(xmlWriter, font)
- xmlWriter.endtag("ClipBox")
- xmlWriter.newline()
- xmlWriter.endtag("Clip")
- xmlWriter.newline()
- xmlWriter.endtag(tableName)
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- clips = getattr(self, "clips", None)
- if clips is None:
- self.clips = clips = {}
- assert name == "Clip"
- glyphs = []
- clipBox = None
- for elem in content:
- if not isinstance(elem, tuple):
- continue
- name, attrs, content = elem
- if name == "Glyph":
- glyphs.append(attrs["value"])
- elif name == "ClipBox":
- clipBox = ClipBox()
- clipBox.Format = safeEval(attrs["Format"])
- for elem in content:
- if not isinstance(elem, tuple):
- continue
- name, attrs, content = elem
- clipBox.fromXML(name, attrs, content, font)
- if clipBox:
- for glyphName in glyphs:
- clips[glyphName] = clipBox
+ def preWrite(self, font):
+ if not hasattr(self, "clips"):
+ self.clips = {}
+ clipBoxRanges = {}
+ glyphMap = font.getReverseGlyphMap()
+ for glyphs, clipBox in self.groups().items():
+ glyphIDs = sorted(
+ glyphMap[glyphName] for glyphName in glyphs if glyphName in glyphMap
+ )
+ if not glyphIDs:
+ continue
+ last = glyphIDs[0]
+ ranges = [[last]]
+ for glyphID in glyphIDs[1:]:
+ if glyphID != last + 1:
+ ranges[-1].append(last)
+ ranges.append([glyphID])
+ last = glyphID
+ ranges[-1].append(last)
+ for start, end in ranges:
+ assert (start, end) not in clipBoxRanges
+ clipBoxRanges[(start, end)] = clipBox
+
+ clipRecords = []
+ for (start, end), clipBox in sorted(clipBoxRanges.items()):
+ record = ClipRecord()
+ record.StartGlyphID = start
+ record.EndGlyphID = end
+ record.ClipBox = clipBox
+ clipRecords.append(record)
+ rawTable = {
+ "ClipCount": len(clipRecords),
+ "ClipRecord": clipRecords,
+ }
+ return rawTable
+
+ def toXML(self, xmlWriter, font, attrs=None, name=None):
+ tableName = name if name else self.__class__.__name__
+ if attrs is None:
+ attrs = []
+ if hasattr(self, "Format"):
+ attrs.append(("Format", self.Format))
+ xmlWriter.begintag(tableName, attrs)
+ xmlWriter.newline()
+ # sort clips alphabetically to ensure deterministic XML dump
+ for glyphs, clipBox in sorted(
+ self.groups().items(), key=lambda item: min(item[0])
+ ):
+ xmlWriter.begintag("Clip")
+ xmlWriter.newline()
+ for glyphName in sorted(glyphs):
+ xmlWriter.simpletag("Glyph", value=glyphName)
+ xmlWriter.newline()
+ xmlWriter.begintag("ClipBox", [("Format", clipBox.Format)])
+ xmlWriter.newline()
+ clipBox.toXML2(xmlWriter, font)
+ xmlWriter.endtag("ClipBox")
+ xmlWriter.newline()
+ xmlWriter.endtag("Clip")
+ xmlWriter.newline()
+ xmlWriter.endtag(tableName)
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ clips = getattr(self, "clips", None)
+ if clips is None:
+ self.clips = clips = {}
+ assert name == "Clip"
+ glyphs = []
+ clipBox = None
+ for elem in content:
+ if not isinstance(elem, tuple):
+ continue
+ name, attrs, content = elem
+ if name == "Glyph":
+ glyphs.append(attrs["value"])
+ elif name == "ClipBox":
+ clipBox = ClipBox()
+ clipBox.Format = safeEval(attrs["Format"])
+ for elem in content:
+ if not isinstance(elem, tuple):
+ continue
+ name, attrs, content = elem
+ clipBox.fromXML(name, attrs, content, font)
+ if clipBox:
+ for glyphName in glyphs:
+ clips[glyphName] = clipBox
class ExtendMode(IntEnum):
- PAD = 0
- REPEAT = 1
- REFLECT = 2
+ PAD = 0
+ REPEAT = 1
+ REFLECT = 2
# Porter-Duff modes for COLRv1 PaintComposite:
# https://github.com/googlefonts/colr-gradients-spec/tree/off_sub_1#compositemode-enumeration
class CompositeMode(IntEnum):
- CLEAR = 0
- SRC = 1
- DEST = 2
- SRC_OVER = 3
- DEST_OVER = 4
- SRC_IN = 5
- DEST_IN = 6
- SRC_OUT = 7
- DEST_OUT = 8
- SRC_ATOP = 9
- DEST_ATOP = 10
- XOR = 11
- PLUS = 12
- SCREEN = 13
- OVERLAY = 14
- DARKEN = 15
- LIGHTEN = 16
- COLOR_DODGE = 17
- COLOR_BURN = 18
- HARD_LIGHT = 19
- SOFT_LIGHT = 20
- DIFFERENCE = 21
- EXCLUSION = 22
- MULTIPLY = 23
- HSL_HUE = 24
- HSL_SATURATION = 25
- HSL_COLOR = 26
- HSL_LUMINOSITY = 27
+ CLEAR = 0
+ SRC = 1
+ DEST = 2
+ SRC_OVER = 3
+ DEST_OVER = 4
+ SRC_IN = 5
+ DEST_IN = 6
+ SRC_OUT = 7
+ DEST_OUT = 8
+ SRC_ATOP = 9
+ DEST_ATOP = 10
+ XOR = 11
+ PLUS = 12
+ SCREEN = 13
+ OVERLAY = 14
+ DARKEN = 15
+ LIGHTEN = 16
+ COLOR_DODGE = 17
+ COLOR_BURN = 18
+ HARD_LIGHT = 19
+ SOFT_LIGHT = 20
+ DIFFERENCE = 21
+ EXCLUSION = 22
+ MULTIPLY = 23
+ HSL_HUE = 24
+ HSL_SATURATION = 25
+ HSL_COLOR = 26
+ HSL_LUMINOSITY = 27
class PaintFormat(IntEnum):
- PaintColrLayers = 1
- PaintSolid = 2
- PaintVarSolid = 3,
- PaintLinearGradient = 4
- PaintVarLinearGradient = 5
- PaintRadialGradient = 6
- PaintVarRadialGradient = 7
- PaintSweepGradient = 8
- PaintVarSweepGradient = 9
- PaintGlyph = 10
- PaintColrGlyph = 11
- PaintTransform = 12
- PaintVarTransform = 13
- PaintTranslate = 14
- PaintVarTranslate = 15
- PaintScale = 16
- PaintVarScale = 17
- PaintScaleAroundCenter = 18
- PaintVarScaleAroundCenter = 19
- PaintScaleUniform = 20
- PaintVarScaleUniform = 21
- PaintScaleUniformAroundCenter = 22
- PaintVarScaleUniformAroundCenter = 23
- PaintRotate = 24
- PaintVarRotate = 25
- PaintRotateAroundCenter = 26
- PaintVarRotateAroundCenter = 27
- PaintSkew = 28
- PaintVarSkew = 29
- PaintSkewAroundCenter = 30
- PaintVarSkewAroundCenter = 31
- PaintComposite = 32
-
- def is_variable(self):
- return self.name.startswith("PaintVar")
-
- def as_variable(self):
- if self.is_variable():
- return self
- try:
- return PaintFormat.__members__[f"PaintVar{self.name[5:]}"]
- except KeyError:
- return None
+ PaintColrLayers = 1
+ PaintSolid = 2
+ PaintVarSolid = 3
+ PaintLinearGradient = 4
+ PaintVarLinearGradient = 5
+ PaintRadialGradient = 6
+ PaintVarRadialGradient = 7
+ PaintSweepGradient = 8
+ PaintVarSweepGradient = 9
+ PaintGlyph = 10
+ PaintColrGlyph = 11
+ PaintTransform = 12
+ PaintVarTransform = 13
+ PaintTranslate = 14
+ PaintVarTranslate = 15
+ PaintScale = 16
+ PaintVarScale = 17
+ PaintScaleAroundCenter = 18
+ PaintVarScaleAroundCenter = 19
+ PaintScaleUniform = 20
+ PaintVarScaleUniform = 21
+ PaintScaleUniformAroundCenter = 22
+ PaintVarScaleUniformAroundCenter = 23
+ PaintRotate = 24
+ PaintVarRotate = 25
+ PaintRotateAroundCenter = 26
+ PaintVarRotateAroundCenter = 27
+ PaintSkew = 28
+ PaintVarSkew = 29
+ PaintSkewAroundCenter = 30
+ PaintVarSkewAroundCenter = 31
+ PaintComposite = 32
+
+ def is_variable(self):
+ return self.name.startswith("PaintVar")
+
+ def as_variable(self):
+ if self.is_variable():
+ return self
+ try:
+ return PaintFormat.__members__[f"PaintVar{self.name[5:]}"]
+ except KeyError:
+ return None
class Paint(getFormatSwitchingBaseTableClass("uint8")):
- formatEnum = PaintFormat
-
- def getFormatName(self):
- try:
- return self.formatEnum(self.Format).name
- except ValueError:
- raise NotImplementedError(f"Unknown Paint format: {self.Format}")
-
- def toXML(self, xmlWriter, font, attrs=None, name=None):
- tableName = name if name else self.__class__.__name__
- if attrs is None:
- attrs = []
- attrs.append(("Format", self.Format))
- xmlWriter.begintag(tableName, attrs)
- xmlWriter.comment(self.getFormatName())
- xmlWriter.newline()
- self.toXML2(xmlWriter, font)
- xmlWriter.endtag(tableName)
- xmlWriter.newline()
-
- def getChildren(self, colr):
- if self.Format == PaintFormat.PaintColrLayers:
- # https://github.com/fonttools/fonttools/issues/2438: don't die when no LayerList exists
- layers = []
- if colr.LayerList is not None:
- layers = colr.LayerList.Paint
- return layers[
- self.FirstLayerIndex : self.FirstLayerIndex + self.NumLayers
- ]
-
- if self.Format == PaintFormat.PaintColrGlyph:
- for record in colr.BaseGlyphList.BaseGlyphPaintRecord:
- if record.BaseGlyph == self.Glyph:
- return [record.Paint]
- else:
- raise KeyError(f"{self.Glyph!r} not in colr.BaseGlyphList")
-
- children = []
- for conv in self.getConverters():
- if conv.tableClass is not None and issubclass(conv.tableClass, type(self)):
- children.append(getattr(self, conv.name))
-
- return children
-
- def traverse(self, colr: COLR, callback):
- """Depth-first traversal of graph rooted at self, callback on each node."""
- if not callable(callback):
- raise TypeError("callback must be callable")
- stack = [self]
- visited = set()
- while stack:
- current = stack.pop()
- if id(current) in visited:
- continue
- callback(current)
- visited.add(id(current))
- stack.extend(reversed(current.getChildren(colr)))
+ formatEnum = PaintFormat
+
+ def getFormatName(self):
+ try:
+ return self.formatEnum(self.Format).name
+ except ValueError:
+ raise NotImplementedError(f"Unknown Paint format: {self.Format}")
+
+ def toXML(self, xmlWriter, font, attrs=None, name=None):
+ tableName = name if name else self.__class__.__name__
+ if attrs is None:
+ attrs = []
+ attrs.append(("Format", self.Format))
+ xmlWriter.begintag(tableName, attrs)
+ xmlWriter.comment(self.getFormatName())
+ xmlWriter.newline()
+ self.toXML2(xmlWriter, font)
+ xmlWriter.endtag(tableName)
+ xmlWriter.newline()
+
+ def iterPaintSubTables(self, colr: COLR) -> Iterator[BaseTable.SubTableEntry]:
+ if self.Format == PaintFormat.PaintColrLayers:
+ # https://github.com/fonttools/fonttools/issues/2438: don't die when no LayerList exists
+ layers = []
+ if colr.LayerList is not None:
+ layers = colr.LayerList.Paint
+ yield from (
+ BaseTable.SubTableEntry(name="Layers", value=v, index=i)
+ for i, v in enumerate(
+ layers[self.FirstLayerIndex : self.FirstLayerIndex + self.NumLayers]
+ )
+ )
+ return
+
+ if self.Format == PaintFormat.PaintColrGlyph:
+ for record in colr.BaseGlyphList.BaseGlyphPaintRecord:
+ if record.BaseGlyph == self.Glyph:
+ yield BaseTable.SubTableEntry(name="BaseGlyph", value=record.Paint)
+ return
+ else:
+ raise KeyError(f"{self.Glyph!r} not in colr.BaseGlyphList")
+
+ for conv in self.getConverters():
+ if conv.tableClass is not None and issubclass(conv.tableClass, type(self)):
+ value = getattr(self, conv.name)
+ yield BaseTable.SubTableEntry(name=conv.name, value=value)
+
+ def getChildren(self, colr) -> List["Paint"]:
+ # this is kept for backward compatibility (e.g. it's used by the subsetter)
+ return [p.value for p in self.iterPaintSubTables(colr)]
+
+ def traverse(self, colr: COLR, callback):
+ """Depth-first traversal of graph rooted at self, callback on each node."""
+ if not callable(callback):
+ raise TypeError("callback must be callable")
+
+ for path in dfs_base_table(
+ self, iter_subtables_fn=lambda paint: paint.iterPaintSubTables(colr)
+ ):
+ paint = path[-1].value
+ callback(paint)
+
+ def getTransform(self) -> Transform:
+ if self.Format == PaintFormat.PaintTransform:
+ t = self.Transform
+ return Transform(t.xx, t.yx, t.xy, t.yy, t.dx, t.dy)
+ elif self.Format == PaintFormat.PaintTranslate:
+ return Identity.translate(self.dx, self.dy)
+ elif self.Format == PaintFormat.PaintScale:
+ return Identity.scale(self.scaleX, self.scaleY)
+ elif self.Format == PaintFormat.PaintScaleAroundCenter:
+ return (
+ Identity.translate(self.centerX, self.centerY)
+ .scale(self.scaleX, self.scaleY)
+ .translate(-self.centerX, -self.centerY)
+ )
+ elif self.Format == PaintFormat.PaintScaleUniform:
+ return Identity.scale(self.scale)
+ elif self.Format == PaintFormat.PaintScaleUniformAroundCenter:
+ return (
+ Identity.translate(self.centerX, self.centerY)
+ .scale(self.scale)
+ .translate(-self.centerX, -self.centerY)
+ )
+ elif self.Format == PaintFormat.PaintRotate:
+ return Identity.rotate(radians(self.angle))
+ elif self.Format == PaintFormat.PaintRotateAroundCenter:
+ return (
+ Identity.translate(self.centerX, self.centerY)
+ .rotate(radians(self.angle))
+ .translate(-self.centerX, -self.centerY)
+ )
+ elif self.Format == PaintFormat.PaintSkew:
+ return Identity.skew(radians(-self.xSkewAngle), radians(self.ySkewAngle))
+ elif self.Format == PaintFormat.PaintSkewAroundCenter:
+ return (
+ Identity.translate(self.centerX, self.centerY)
+ .skew(radians(-self.xSkewAngle), radians(self.ySkewAngle))
+ .translate(-self.centerX, -self.centerY)
+ )
+ if PaintFormat(self.Format).is_variable():
+ raise NotImplementedError(f"Variable Paints not supported: {self.Format}")
+
+ return Identity
+
+ def computeClipBox(
+ self, colr: COLR, glyphSet: "_TTGlyphSet", quantization: int = 1
+ ) -> Optional[ClipBox]:
+ pen = ControlBoundsPen(glyphSet)
+ for path in dfs_base_table(
+ self, iter_subtables_fn=lambda paint: paint.iterPaintSubTables(colr)
+ ):
+ paint = path[-1].value
+ if paint.Format == PaintFormat.PaintGlyph:
+ transformation = reduce(
+ Transform.transform,
+ (st.value.getTransform() for st in path),
+ Identity,
+ )
+ glyphSet[paint.Glyph].draw(TransformPen(pen, transformation))
+
+ if pen.bounds is None:
+ return None
+
+ cb = ClipBox()
+ cb.Format = int(ClipBoxFormat.Static)
+ cb.xMin, cb.yMin, cb.xMax, cb.yMax = quantizeRect(pen.bounds, quantization)
+ return cb
# For each subtable format there is a class. However, we don't really distinguish
@@ -1595,30 +1712,82 @@ class Paint(getFormatSwitchingBaseTableClass("uint8")):
# subclass for each alternate field name.
#
_equivalents = {
- 'MarkArray': ("Mark1Array",),
- 'LangSys': ('DefaultLangSys',),
- 'Coverage': ('MarkCoverage', 'BaseCoverage', 'LigatureCoverage', 'Mark1Coverage',
- 'Mark2Coverage', 'BacktrackCoverage', 'InputCoverage',
- 'LookAheadCoverage', 'VertGlyphCoverage', 'HorizGlyphCoverage',
- 'TopAccentCoverage', 'ExtendedShapeCoverage', 'MathKernCoverage'),
- 'ClassDef': ('ClassDef1', 'ClassDef2', 'BacktrackClassDef', 'InputClassDef',
- 'LookAheadClassDef', 'GlyphClassDef', 'MarkAttachClassDef'),
- 'Anchor': ('EntryAnchor', 'ExitAnchor', 'BaseAnchor', 'LigatureAnchor',
- 'Mark2Anchor', 'MarkAnchor'),
- 'Device': ('XPlaDevice', 'YPlaDevice', 'XAdvDevice', 'YAdvDevice',
- 'XDeviceTable', 'YDeviceTable', 'DeviceTable'),
- 'Axis': ('HorizAxis', 'VertAxis',),
- 'MinMax': ('DefaultMinMax',),
- 'BaseCoord': ('MinCoord', 'MaxCoord',),
- 'JstfLangSys': ('DefJstfLangSys',),
- 'JstfGSUBModList': ('ShrinkageEnableGSUB', 'ShrinkageDisableGSUB', 'ExtensionEnableGSUB',
- 'ExtensionDisableGSUB',),
- 'JstfGPOSModList': ('ShrinkageEnableGPOS', 'ShrinkageDisableGPOS', 'ExtensionEnableGPOS',
- 'ExtensionDisableGPOS',),
- 'JstfMax': ('ShrinkageJstfMax', 'ExtensionJstfMax',),
- 'MathKern': ('TopRightMathKern', 'TopLeftMathKern', 'BottomRightMathKern',
- 'BottomLeftMathKern'),
- 'MathGlyphConstruction': ('VertGlyphConstruction', 'HorizGlyphConstruction'),
+ "MarkArray": ("Mark1Array",),
+ "LangSys": ("DefaultLangSys",),
+ "Coverage": (
+ "MarkCoverage",
+ "BaseCoverage",
+ "LigatureCoverage",
+ "Mark1Coverage",
+ "Mark2Coverage",
+ "BacktrackCoverage",
+ "InputCoverage",
+ "LookAheadCoverage",
+ "VertGlyphCoverage",
+ "HorizGlyphCoverage",
+ "TopAccentCoverage",
+ "ExtendedShapeCoverage",
+ "MathKernCoverage",
+ ),
+ "ClassDef": (
+ "ClassDef1",
+ "ClassDef2",
+ "BacktrackClassDef",
+ "InputClassDef",
+ "LookAheadClassDef",
+ "GlyphClassDef",
+ "MarkAttachClassDef",
+ ),
+ "Anchor": (
+ "EntryAnchor",
+ "ExitAnchor",
+ "BaseAnchor",
+ "LigatureAnchor",
+ "Mark2Anchor",
+ "MarkAnchor",
+ ),
+ "Device": (
+ "XPlaDevice",
+ "YPlaDevice",
+ "XAdvDevice",
+ "YAdvDevice",
+ "XDeviceTable",
+ "YDeviceTable",
+ "DeviceTable",
+ ),
+ "Axis": (
+ "HorizAxis",
+ "VertAxis",
+ ),
+ "MinMax": ("DefaultMinMax",),
+ "BaseCoord": (
+ "MinCoord",
+ "MaxCoord",
+ ),
+ "JstfLangSys": ("DefJstfLangSys",),
+ "JstfGSUBModList": (
+ "ShrinkageEnableGSUB",
+ "ShrinkageDisableGSUB",
+ "ExtensionEnableGSUB",
+ "ExtensionDisableGSUB",
+ ),
+ "JstfGPOSModList": (
+ "ShrinkageEnableGPOS",
+ "ShrinkageDisableGPOS",
+ "ExtensionEnableGPOS",
+ "ExtensionDisableGPOS",
+ ),
+ "JstfMax": (
+ "ShrinkageJstfMax",
+ "ExtensionJstfMax",
+ ),
+ "MathKern": (
+ "TopRightMathKern",
+ "TopLeftMathKern",
+ "BottomRightMathKern",
+ "BottomLeftMathKern",
+ ),
+ "MathGlyphConstruction": ("VertGlyphConstruction", "HorizGlyphConstruction"),
}
#
@@ -1626,468 +1795,479 @@ _equivalents = {
# XXX This should probably move to otBase.py
#
+
def fixLookupOverFlows(ttf, overflowRecord):
- """ Either the offset from the LookupList to a lookup overflowed, or
- an offset from a lookup to a subtable overflowed.
- The table layout is:
- GPSO/GUSB
- Script List
- Feature List
- LookUpList
- Lookup[0] and contents
- SubTable offset list
- SubTable[0] and contents
- ...
- SubTable[n] and contents
- ...
- Lookup[n] and contents
- SubTable offset list
- SubTable[0] and contents
- ...
- SubTable[n] and contents
- If the offset to a lookup overflowed (SubTableIndex is None)
- we must promote the *previous* lookup to an Extension type.
- If the offset from a lookup to subtable overflowed, then we must promote it
- to an Extension Lookup type.
- """
- ok = 0
- lookupIndex = overflowRecord.LookupListIndex
- if (overflowRecord.SubTableIndex is None):
- lookupIndex = lookupIndex - 1
- if lookupIndex < 0:
- return ok
- if overflowRecord.tableType == 'GSUB':
- extType = 7
- elif overflowRecord.tableType == 'GPOS':
- extType = 9
-
- lookups = ttf[overflowRecord.tableType].table.LookupList.Lookup
- lookup = lookups[lookupIndex]
- # If the previous lookup is an extType, look further back. Very unlikely, but possible.
- while lookup.SubTable[0].__class__.LookupType == extType:
- lookupIndex = lookupIndex -1
- if lookupIndex < 0:
- return ok
- lookup = lookups[lookupIndex]
-
- for lookupIndex in range(lookupIndex, len(lookups)):
- lookup = lookups[lookupIndex]
- if lookup.LookupType != extType:
- lookup.LookupType = extType
- for si in range(len(lookup.SubTable)):
- subTable = lookup.SubTable[si]
- extSubTableClass = lookupTypes[overflowRecord.tableType][extType]
- extSubTable = extSubTableClass()
- extSubTable.Format = 1
- extSubTable.ExtSubTable = subTable
- lookup.SubTable[si] = extSubTable
- ok = 1
- return ok
+ """Either the offset from the LookupList to a lookup overflowed, or
+ an offset from a lookup to a subtable overflowed.
+ The table layout is:
+ GPSO/GUSB
+ Script List
+ Feature List
+ LookUpList
+ Lookup[0] and contents
+ SubTable offset list
+ SubTable[0] and contents
+ ...
+ SubTable[n] and contents
+ ...
+ Lookup[n] and contents
+ SubTable offset list
+ SubTable[0] and contents
+ ...
+ SubTable[n] and contents
+ If the offset to a lookup overflowed (SubTableIndex is None)
+ we must promote the *previous* lookup to an Extension type.
+ If the offset from a lookup to subtable overflowed, then we must promote it
+ to an Extension Lookup type.
+ """
+ ok = 0
+ lookupIndex = overflowRecord.LookupListIndex
+ if overflowRecord.SubTableIndex is None:
+ lookupIndex = lookupIndex - 1
+ if lookupIndex < 0:
+ return ok
+ if overflowRecord.tableType == "GSUB":
+ extType = 7
+ elif overflowRecord.tableType == "GPOS":
+ extType = 9
+
+ lookups = ttf[overflowRecord.tableType].table.LookupList.Lookup
+ lookup = lookups[lookupIndex]
+ # If the previous lookup is an extType, look further back. Very unlikely, but possible.
+ while lookup.SubTable[0].__class__.LookupType == extType:
+ lookupIndex = lookupIndex - 1
+ if lookupIndex < 0:
+ return ok
+ lookup = lookups[lookupIndex]
+
+ for lookupIndex in range(lookupIndex, len(lookups)):
+ lookup = lookups[lookupIndex]
+ if lookup.LookupType != extType:
+ lookup.LookupType = extType
+ for si in range(len(lookup.SubTable)):
+ subTable = lookup.SubTable[si]
+ extSubTableClass = lookupTypes[overflowRecord.tableType][extType]
+ extSubTable = extSubTableClass()
+ extSubTable.Format = 1
+ extSubTable.ExtSubTable = subTable
+ lookup.SubTable[si] = extSubTable
+ ok = 1
+ return ok
+
def splitMultipleSubst(oldSubTable, newSubTable, overflowRecord):
- ok = 1
- oldMapping = sorted(oldSubTable.mapping.items())
- oldLen = len(oldMapping)
-
- if overflowRecord.itemName in ['Coverage', 'RangeRecord']:
- # Coverage table is written last. Overflow is to or within the
- # the coverage table. We will just cut the subtable in half.
- newLen = oldLen // 2
-
- elif overflowRecord.itemName == 'Sequence':
- # We just need to back up by two items from the overflowed
- # Sequence index to make sure the offset to the Coverage table
- # doesn't overflow.
- newLen = overflowRecord.itemIndex - 1
-
- newSubTable.mapping = {}
- for i in range(newLen, oldLen):
- item = oldMapping[i]
- key = item[0]
- newSubTable.mapping[key] = item[1]
- del oldSubTable.mapping[key]
-
- return ok
+ ok = 1
+ oldMapping = sorted(oldSubTable.mapping.items())
+ oldLen = len(oldMapping)
+
+ if overflowRecord.itemName in ["Coverage", "RangeRecord"]:
+ # Coverage table is written last. Overflow is to or within the
+ # the coverage table. We will just cut the subtable in half.
+ newLen = oldLen // 2
+
+ elif overflowRecord.itemName == "Sequence":
+ # We just need to back up by two items from the overflowed
+ # Sequence index to make sure the offset to the Coverage table
+ # doesn't overflow.
+ newLen = overflowRecord.itemIndex - 1
+
+ newSubTable.mapping = {}
+ for i in range(newLen, oldLen):
+ item = oldMapping[i]
+ key = item[0]
+ newSubTable.mapping[key] = item[1]
+ del oldSubTable.mapping[key]
+
+ return ok
+
def splitAlternateSubst(oldSubTable, newSubTable, overflowRecord):
- ok = 1
- if hasattr(oldSubTable, 'sortCoverageLast'):
- newSubTable.sortCoverageLast = oldSubTable.sortCoverageLast
+ ok = 1
+ if hasattr(oldSubTable, "sortCoverageLast"):
+ newSubTable.sortCoverageLast = oldSubTable.sortCoverageLast
- oldAlts = sorted(oldSubTable.alternates.items())
- oldLen = len(oldAlts)
+ oldAlts = sorted(oldSubTable.alternates.items())
+ oldLen = len(oldAlts)
- if overflowRecord.itemName in [ 'Coverage', 'RangeRecord']:
- # Coverage table is written last. overflow is to or within the
- # the coverage table. We will just cut the subtable in half.
- newLen = oldLen//2
+ if overflowRecord.itemName in ["Coverage", "RangeRecord"]:
+ # Coverage table is written last. overflow is to or within the
+ # the coverage table. We will just cut the subtable in half.
+ newLen = oldLen // 2
- elif overflowRecord.itemName == 'AlternateSet':
- # We just need to back up by two items
- # from the overflowed AlternateSet index to make sure the offset
- # to the Coverage table doesn't overflow.
- newLen = overflowRecord.itemIndex - 1
+ elif overflowRecord.itemName == "AlternateSet":
+ # We just need to back up by two items
+ # from the overflowed AlternateSet index to make sure the offset
+ # to the Coverage table doesn't overflow.
+ newLen = overflowRecord.itemIndex - 1
- newSubTable.alternates = {}
- for i in range(newLen, oldLen):
- item = oldAlts[i]
- key = item[0]
- newSubTable.alternates[key] = item[1]
- del oldSubTable.alternates[key]
+ newSubTable.alternates = {}
+ for i in range(newLen, oldLen):
+ item = oldAlts[i]
+ key = item[0]
+ newSubTable.alternates[key] = item[1]
+ del oldSubTable.alternates[key]
- return ok
+ return ok
def splitLigatureSubst(oldSubTable, newSubTable, overflowRecord):
- ok = 1
- oldLigs = sorted(oldSubTable.ligatures.items())
- oldLen = len(oldLigs)
+ ok = 1
+ oldLigs = sorted(oldSubTable.ligatures.items())
+ oldLen = len(oldLigs)
- if overflowRecord.itemName in [ 'Coverage', 'RangeRecord']:
- # Coverage table is written last. overflow is to or within the
- # the coverage table. We will just cut the subtable in half.
- newLen = oldLen//2
+ if overflowRecord.itemName in ["Coverage", "RangeRecord"]:
+ # Coverage table is written last. overflow is to or within the
+ # the coverage table. We will just cut the subtable in half.
+ newLen = oldLen // 2
- elif overflowRecord.itemName == 'LigatureSet':
- # We just need to back up by two items
- # from the overflowed AlternateSet index to make sure the offset
- # to the Coverage table doesn't overflow.
- newLen = overflowRecord.itemIndex - 1
+ elif overflowRecord.itemName == "LigatureSet":
+ # We just need to back up by two items
+ # from the overflowed AlternateSet index to make sure the offset
+ # to the Coverage table doesn't overflow.
+ newLen = overflowRecord.itemIndex - 1
- newSubTable.ligatures = {}
- for i in range(newLen, oldLen):
- item = oldLigs[i]
- key = item[0]
- newSubTable.ligatures[key] = item[1]
- del oldSubTable.ligatures[key]
+ newSubTable.ligatures = {}
+ for i in range(newLen, oldLen):
+ item = oldLigs[i]
+ key = item[0]
+ newSubTable.ligatures[key] = item[1]
+ del oldSubTable.ligatures[key]
- return ok
+ return ok
def splitPairPos(oldSubTable, newSubTable, overflowRecord):
- st = oldSubTable
- ok = False
- newSubTable.Format = oldSubTable.Format
- if oldSubTable.Format == 1 and len(oldSubTable.PairSet) > 1:
- for name in 'ValueFormat1', 'ValueFormat2':
- setattr(newSubTable, name, getattr(oldSubTable, name))
+ st = oldSubTable
+ ok = False
+ newSubTable.Format = oldSubTable.Format
+ if oldSubTable.Format == 1 and len(oldSubTable.PairSet) > 1:
+ for name in "ValueFormat1", "ValueFormat2":
+ setattr(newSubTable, name, getattr(oldSubTable, name))
- # Move top half of coverage to new subtable
+ # Move top half of coverage to new subtable
- newSubTable.Coverage = oldSubTable.Coverage.__class__()
+ newSubTable.Coverage = oldSubTable.Coverage.__class__()
- coverage = oldSubTable.Coverage.glyphs
- records = oldSubTable.PairSet
+ coverage = oldSubTable.Coverage.glyphs
+ records = oldSubTable.PairSet
- oldCount = len(oldSubTable.PairSet) // 2
+ oldCount = len(oldSubTable.PairSet) // 2
- oldSubTable.Coverage.glyphs = coverage[:oldCount]
- oldSubTable.PairSet = records[:oldCount]
+ oldSubTable.Coverage.glyphs = coverage[:oldCount]
+ oldSubTable.PairSet = records[:oldCount]
- newSubTable.Coverage.glyphs = coverage[oldCount:]
- newSubTable.PairSet = records[oldCount:]
+ newSubTable.Coverage.glyphs = coverage[oldCount:]
+ newSubTable.PairSet = records[oldCount:]
- oldSubTable.PairSetCount = len(oldSubTable.PairSet)
- newSubTable.PairSetCount = len(newSubTable.PairSet)
+ oldSubTable.PairSetCount = len(oldSubTable.PairSet)
+ newSubTable.PairSetCount = len(newSubTable.PairSet)
- ok = True
+ ok = True
- elif oldSubTable.Format == 2 and len(oldSubTable.Class1Record) > 1:
- if not hasattr(oldSubTable, 'Class2Count'):
- oldSubTable.Class2Count = len(oldSubTable.Class1Record[0].Class2Record)
- for name in 'Class2Count', 'ClassDef2', 'ValueFormat1', 'ValueFormat2':
- setattr(newSubTable, name, getattr(oldSubTable, name))
+ elif oldSubTable.Format == 2 and len(oldSubTable.Class1Record) > 1:
+ if not hasattr(oldSubTable, "Class2Count"):
+ oldSubTable.Class2Count = len(oldSubTable.Class1Record[0].Class2Record)
+ for name in "Class2Count", "ClassDef2", "ValueFormat1", "ValueFormat2":
+ setattr(newSubTable, name, getattr(oldSubTable, name))
- # The two subtables will still have the same ClassDef2 and the table
- # sharing will still cause the sharing to overflow. As such, disable
- # sharing on the one that is serialized second (that's oldSubTable).
- oldSubTable.DontShare = True
+ # The two subtables will still have the same ClassDef2 and the table
+ # sharing will still cause the sharing to overflow. As such, disable
+ # sharing on the one that is serialized second (that's oldSubTable).
+ oldSubTable.DontShare = True
- # Move top half of class numbers to new subtable
+ # Move top half of class numbers to new subtable
- newSubTable.Coverage = oldSubTable.Coverage.__class__()
- newSubTable.ClassDef1 = oldSubTable.ClassDef1.__class__()
+ newSubTable.Coverage = oldSubTable.Coverage.__class__()
+ newSubTable.ClassDef1 = oldSubTable.ClassDef1.__class__()
- coverage = oldSubTable.Coverage.glyphs
- classDefs = oldSubTable.ClassDef1.classDefs
- records = oldSubTable.Class1Record
+ coverage = oldSubTable.Coverage.glyphs
+ classDefs = oldSubTable.ClassDef1.classDefs
+ records = oldSubTable.Class1Record
- oldCount = len(oldSubTable.Class1Record) // 2
- newGlyphs = set(k for k,v in classDefs.items() if v >= oldCount)
+ oldCount = len(oldSubTable.Class1Record) // 2
+ newGlyphs = set(k for k, v in classDefs.items() if v >= oldCount)
- oldSubTable.Coverage.glyphs = [g for g in coverage if g not in newGlyphs]
- oldSubTable.ClassDef1.classDefs = {k:v for k,v in classDefs.items() if v < oldCount}
- oldSubTable.Class1Record = records[:oldCount]
+ oldSubTable.Coverage.glyphs = [g for g in coverage if g not in newGlyphs]
+ oldSubTable.ClassDef1.classDefs = {
+ k: v for k, v in classDefs.items() if v < oldCount
+ }
+ oldSubTable.Class1Record = records[:oldCount]
- newSubTable.Coverage.glyphs = [g for g in coverage if g in newGlyphs]
- newSubTable.ClassDef1.classDefs = {k:(v-oldCount) for k,v in classDefs.items() if v > oldCount}
- newSubTable.Class1Record = records[oldCount:]
+ newSubTable.Coverage.glyphs = [g for g in coverage if g in newGlyphs]
+ newSubTable.ClassDef1.classDefs = {
+ k: (v - oldCount) for k, v in classDefs.items() if v > oldCount
+ }
+ newSubTable.Class1Record = records[oldCount:]
- oldSubTable.Class1Count = len(oldSubTable.Class1Record)
- newSubTable.Class1Count = len(newSubTable.Class1Record)
+ oldSubTable.Class1Count = len(oldSubTable.Class1Record)
+ newSubTable.Class1Count = len(newSubTable.Class1Record)
- ok = True
+ ok = True
- return ok
+ return ok
def splitMarkBasePos(oldSubTable, newSubTable, overflowRecord):
- # split half of the mark classes to the new subtable
- classCount = oldSubTable.ClassCount
- if classCount < 2:
- # oh well, not much left to split...
- return False
-
- oldClassCount = classCount // 2
- newClassCount = classCount - oldClassCount
-
- oldMarkCoverage, oldMarkRecords = [], []
- newMarkCoverage, newMarkRecords = [], []
- for glyphName, markRecord in zip(
- oldSubTable.MarkCoverage.glyphs,
- oldSubTable.MarkArray.MarkRecord
- ):
- if markRecord.Class < oldClassCount:
- oldMarkCoverage.append(glyphName)
- oldMarkRecords.append(markRecord)
- else:
- markRecord.Class -= oldClassCount
- newMarkCoverage.append(glyphName)
- newMarkRecords.append(markRecord)
-
- oldBaseRecords, newBaseRecords = [], []
- for rec in oldSubTable.BaseArray.BaseRecord:
- oldBaseRecord, newBaseRecord = rec.__class__(), rec.__class__()
- oldBaseRecord.BaseAnchor = rec.BaseAnchor[:oldClassCount]
- newBaseRecord.BaseAnchor = rec.BaseAnchor[oldClassCount:]
- oldBaseRecords.append(oldBaseRecord)
- newBaseRecords.append(newBaseRecord)
-
- newSubTable.Format = oldSubTable.Format
-
- oldSubTable.MarkCoverage.glyphs = oldMarkCoverage
- newSubTable.MarkCoverage = oldSubTable.MarkCoverage.__class__()
- newSubTable.MarkCoverage.glyphs = newMarkCoverage
-
- # share the same BaseCoverage in both halves
- newSubTable.BaseCoverage = oldSubTable.BaseCoverage
-
- oldSubTable.ClassCount = oldClassCount
- newSubTable.ClassCount = newClassCount
-
- oldSubTable.MarkArray.MarkRecord = oldMarkRecords
- newSubTable.MarkArray = oldSubTable.MarkArray.__class__()
- newSubTable.MarkArray.MarkRecord = newMarkRecords
-
- oldSubTable.MarkArray.MarkCount = len(oldMarkRecords)
- newSubTable.MarkArray.MarkCount = len(newMarkRecords)
-
- oldSubTable.BaseArray.BaseRecord = oldBaseRecords
- newSubTable.BaseArray = oldSubTable.BaseArray.__class__()
- newSubTable.BaseArray.BaseRecord = newBaseRecords
-
- oldSubTable.BaseArray.BaseCount = len(oldBaseRecords)
- newSubTable.BaseArray.BaseCount = len(newBaseRecords)
-
- return True
-
-
-splitTable = { 'GSUB': {
-# 1: splitSingleSubst,
- 2: splitMultipleSubst,
- 3: splitAlternateSubst,
- 4: splitLigatureSubst,
-# 5: splitContextSubst,
-# 6: splitChainContextSubst,
-# 7: splitExtensionSubst,
-# 8: splitReverseChainSingleSubst,
- },
- 'GPOS': {
-# 1: splitSinglePos,
- 2: splitPairPos,
-# 3: splitCursivePos,
- 4: splitMarkBasePos,
-# 5: splitMarkLigPos,
-# 6: splitMarkMarkPos,
-# 7: splitContextPos,
-# 8: splitChainContextPos,
-# 9: splitExtensionPos,
- }
-
- }
+ # split half of the mark classes to the new subtable
+ classCount = oldSubTable.ClassCount
+ if classCount < 2:
+ # oh well, not much left to split...
+ return False
+
+ oldClassCount = classCount // 2
+ newClassCount = classCount - oldClassCount
+
+ oldMarkCoverage, oldMarkRecords = [], []
+ newMarkCoverage, newMarkRecords = [], []
+ for glyphName, markRecord in zip(
+ oldSubTable.MarkCoverage.glyphs, oldSubTable.MarkArray.MarkRecord
+ ):
+ if markRecord.Class < oldClassCount:
+ oldMarkCoverage.append(glyphName)
+ oldMarkRecords.append(markRecord)
+ else:
+ markRecord.Class -= oldClassCount
+ newMarkCoverage.append(glyphName)
+ newMarkRecords.append(markRecord)
+
+ oldBaseRecords, newBaseRecords = [], []
+ for rec in oldSubTable.BaseArray.BaseRecord:
+ oldBaseRecord, newBaseRecord = rec.__class__(), rec.__class__()
+ oldBaseRecord.BaseAnchor = rec.BaseAnchor[:oldClassCount]
+ newBaseRecord.BaseAnchor = rec.BaseAnchor[oldClassCount:]
+ oldBaseRecords.append(oldBaseRecord)
+ newBaseRecords.append(newBaseRecord)
+
+ newSubTable.Format = oldSubTable.Format
+
+ oldSubTable.MarkCoverage.glyphs = oldMarkCoverage
+ newSubTable.MarkCoverage = oldSubTable.MarkCoverage.__class__()
+ newSubTable.MarkCoverage.glyphs = newMarkCoverage
+
+ # share the same BaseCoverage in both halves
+ newSubTable.BaseCoverage = oldSubTable.BaseCoverage
+
+ oldSubTable.ClassCount = oldClassCount
+ newSubTable.ClassCount = newClassCount
+
+ oldSubTable.MarkArray.MarkRecord = oldMarkRecords
+ newSubTable.MarkArray = oldSubTable.MarkArray.__class__()
+ newSubTable.MarkArray.MarkRecord = newMarkRecords
+
+ oldSubTable.MarkArray.MarkCount = len(oldMarkRecords)
+ newSubTable.MarkArray.MarkCount = len(newMarkRecords)
+
+ oldSubTable.BaseArray.BaseRecord = oldBaseRecords
+ newSubTable.BaseArray = oldSubTable.BaseArray.__class__()
+ newSubTable.BaseArray.BaseRecord = newBaseRecords
+
+ oldSubTable.BaseArray.BaseCount = len(oldBaseRecords)
+ newSubTable.BaseArray.BaseCount = len(newBaseRecords)
+
+ return True
+
+
+splitTable = {
+ "GSUB": {
+ # 1: splitSingleSubst,
+ 2: splitMultipleSubst,
+ 3: splitAlternateSubst,
+ 4: splitLigatureSubst,
+ # 5: splitContextSubst,
+ # 6: splitChainContextSubst,
+ # 7: splitExtensionSubst,
+ # 8: splitReverseChainSingleSubst,
+ },
+ "GPOS": {
+ # 1: splitSinglePos,
+ 2: splitPairPos,
+ # 3: splitCursivePos,
+ 4: splitMarkBasePos,
+ # 5: splitMarkLigPos,
+ # 6: splitMarkMarkPos,
+ # 7: splitContextPos,
+ # 8: splitChainContextPos,
+ # 9: splitExtensionPos,
+ },
+}
+
def fixSubTableOverFlows(ttf, overflowRecord):
- """
- An offset has overflowed within a sub-table. We need to divide this subtable into smaller parts.
- """
- table = ttf[overflowRecord.tableType].table
- lookup = table.LookupList.Lookup[overflowRecord.LookupListIndex]
- subIndex = overflowRecord.SubTableIndex
- subtable = lookup.SubTable[subIndex]
-
- # First, try not sharing anything for this subtable...
- if not hasattr(subtable, "DontShare"):
- subtable.DontShare = True
- return True
-
- if hasattr(subtable, 'ExtSubTable'):
- # We split the subtable of the Extension table, and add a new Extension table
- # to contain the new subtable.
-
- subTableType = subtable.ExtSubTable.__class__.LookupType
- extSubTable = subtable
- subtable = extSubTable.ExtSubTable
- newExtSubTableClass = lookupTypes[overflowRecord.tableType][extSubTable.__class__.LookupType]
- newExtSubTable = newExtSubTableClass()
- newExtSubTable.Format = extSubTable.Format
- toInsert = newExtSubTable
-
- newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType]
- newSubTable = newSubTableClass()
- newExtSubTable.ExtSubTable = newSubTable
- else:
- subTableType = subtable.__class__.LookupType
- newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType]
- newSubTable = newSubTableClass()
- toInsert = newSubTable
-
- if hasattr(lookup, 'SubTableCount'): # may not be defined yet.
- lookup.SubTableCount = lookup.SubTableCount + 1
-
- try:
- splitFunc = splitTable[overflowRecord.tableType][subTableType]
- except KeyError:
- log.error(
- "Don't know how to split %s lookup type %s",
- overflowRecord.tableType,
- subTableType,
- )
- return False
-
- ok = splitFunc(subtable, newSubTable, overflowRecord)
- if ok:
- lookup.SubTable.insert(subIndex + 1, toInsert)
- return ok
+ """
+ An offset has overflowed within a sub-table. We need to divide this subtable into smaller parts.
+ """
+ table = ttf[overflowRecord.tableType].table
+ lookup = table.LookupList.Lookup[overflowRecord.LookupListIndex]
+ subIndex = overflowRecord.SubTableIndex
+ subtable = lookup.SubTable[subIndex]
+
+ # First, try not sharing anything for this subtable...
+ if not hasattr(subtable, "DontShare"):
+ subtable.DontShare = True
+ return True
+
+ if hasattr(subtable, "ExtSubTable"):
+ # We split the subtable of the Extension table, and add a new Extension table
+ # to contain the new subtable.
+
+ subTableType = subtable.ExtSubTable.__class__.LookupType
+ extSubTable = subtable
+ subtable = extSubTable.ExtSubTable
+ newExtSubTableClass = lookupTypes[overflowRecord.tableType][
+ extSubTable.__class__.LookupType
+ ]
+ newExtSubTable = newExtSubTableClass()
+ newExtSubTable.Format = extSubTable.Format
+ toInsert = newExtSubTable
+
+ newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType]
+ newSubTable = newSubTableClass()
+ newExtSubTable.ExtSubTable = newSubTable
+ else:
+ subTableType = subtable.__class__.LookupType
+ newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType]
+ newSubTable = newSubTableClass()
+ toInsert = newSubTable
+
+ if hasattr(lookup, "SubTableCount"): # may not be defined yet.
+ lookup.SubTableCount = lookup.SubTableCount + 1
+
+ try:
+ splitFunc = splitTable[overflowRecord.tableType][subTableType]
+ except KeyError:
+ log.error(
+ "Don't know how to split %s lookup type %s",
+ overflowRecord.tableType,
+ subTableType,
+ )
+ return False
+
+ ok = splitFunc(subtable, newSubTable, overflowRecord)
+ if ok:
+ lookup.SubTable.insert(subIndex + 1, toInsert)
+ return ok
+
# End of OverFlow logic
def _buildClasses():
- import re
- from .otData import otData
-
- formatPat = re.compile(r"([A-Za-z0-9]+)Format(\d+)$")
- namespace = globals()
-
- # populate module with classes
- for name, table in otData:
- baseClass = BaseTable
- m = formatPat.match(name)
- if m:
- # XxxFormatN subtable, we only add the "base" table
- name = m.group(1)
- # the first row of a format-switching otData table describes the Format;
- # the first column defines the type of the Format field.
- # Currently this can be either 'uint16' or 'uint8'.
- formatType = table[0][0]
- baseClass = getFormatSwitchingBaseTableClass(formatType)
- if name not in namespace:
- # the class doesn't exist yet, so the base implementation is used.
- cls = type(name, (baseClass,), {})
- if name in ('GSUB', 'GPOS'):
- cls.DontShare = True
- namespace[name] = cls
-
- # link Var{Table} <-> {Table} (e.g. ColorStop <-> VarColorStop, etc.)
- for name, _ in otData:
- if name.startswith("Var") and len(name) > 3 and name[3:] in namespace:
- varType = namespace[name]
- noVarType = namespace[name[3:]]
- varType.NoVarType = noVarType
- noVarType.VarType = varType
-
- for base, alts in _equivalents.items():
- base = namespace[base]
- for alt in alts:
- namespace[alt] = base
-
- global lookupTypes
- lookupTypes = {
- 'GSUB': {
- 1: SingleSubst,
- 2: MultipleSubst,
- 3: AlternateSubst,
- 4: LigatureSubst,
- 5: ContextSubst,
- 6: ChainContextSubst,
- 7: ExtensionSubst,
- 8: ReverseChainSingleSubst,
- },
- 'GPOS': {
- 1: SinglePos,
- 2: PairPos,
- 3: CursivePos,
- 4: MarkBasePos,
- 5: MarkLigPos,
- 6: MarkMarkPos,
- 7: ContextPos,
- 8: ChainContextPos,
- 9: ExtensionPos,
- },
- 'mort': {
- 4: NoncontextualMorph,
- },
- 'morx': {
- 0: RearrangementMorph,
- 1: ContextualMorph,
- 2: LigatureMorph,
- # 3: Reserved,
- 4: NoncontextualMorph,
- 5: InsertionMorph,
- },
- }
- lookupTypes['JSTF'] = lookupTypes['GPOS'] # JSTF contains GPOS
- for lookupEnum in lookupTypes.values():
- for enum, cls in lookupEnum.items():
- cls.LookupType = enum
-
- global featureParamTypes
- featureParamTypes = {
- 'size': FeatureParamsSize,
- }
- for i in range(1, 20+1):
- featureParamTypes['ss%02d' % i] = FeatureParamsStylisticSet
- for i in range(1, 99+1):
- featureParamTypes['cv%02d' % i] = FeatureParamsCharacterVariants
-
- # add converters to classes
- from .otConverters import buildConverters
- for name, table in otData:
- m = formatPat.match(name)
- if m:
- # XxxFormatN subtable, add converter to "base" table
- name, format = m.groups()
- format = int(format)
- cls = namespace[name]
- if not hasattr(cls, "converters"):
- cls.converters = {}
- cls.convertersByName = {}
- converters, convertersByName = buildConverters(table[1:], namespace)
- cls.converters[format] = converters
- cls.convertersByName[format] = convertersByName
- # XXX Add staticSize?
- else:
- cls = namespace[name]
- cls.converters, cls.convertersByName = buildConverters(table, namespace)
- # XXX Add staticSize?
+ import re
+ from .otData import otData
+
+ formatPat = re.compile(r"([A-Za-z0-9]+)Format(\d+)$")
+ namespace = globals()
+
+ # populate module with classes
+ for name, table in otData:
+ baseClass = BaseTable
+ m = formatPat.match(name)
+ if m:
+ # XxxFormatN subtable, we only add the "base" table
+ name = m.group(1)
+ # the first row of a format-switching otData table describes the Format;
+ # the first column defines the type of the Format field.
+ # Currently this can be either 'uint16' or 'uint8'.
+ formatType = table[0][0]
+ baseClass = getFormatSwitchingBaseTableClass(formatType)
+ if name not in namespace:
+ # the class doesn't exist yet, so the base implementation is used.
+ cls = type(name, (baseClass,), {})
+ if name in ("GSUB", "GPOS"):
+ cls.DontShare = True
+ namespace[name] = cls
+
+ # link Var{Table} <-> {Table} (e.g. ColorStop <-> VarColorStop, etc.)
+ for name, _ in otData:
+ if name.startswith("Var") and len(name) > 3 and name[3:] in namespace:
+ varType = namespace[name]
+ noVarType = namespace[name[3:]]
+ varType.NoVarType = noVarType
+ noVarType.VarType = varType
+
+ for base, alts in _equivalents.items():
+ base = namespace[base]
+ for alt in alts:
+ namespace[alt] = base
+
+ global lookupTypes
+ lookupTypes = {
+ "GSUB": {
+ 1: SingleSubst,
+ 2: MultipleSubst,
+ 3: AlternateSubst,
+ 4: LigatureSubst,
+ 5: ContextSubst,
+ 6: ChainContextSubst,
+ 7: ExtensionSubst,
+ 8: ReverseChainSingleSubst,
+ },
+ "GPOS": {
+ 1: SinglePos,
+ 2: PairPos,
+ 3: CursivePos,
+ 4: MarkBasePos,
+ 5: MarkLigPos,
+ 6: MarkMarkPos,
+ 7: ContextPos,
+ 8: ChainContextPos,
+ 9: ExtensionPos,
+ },
+ "mort": {
+ 4: NoncontextualMorph,
+ },
+ "morx": {
+ 0: RearrangementMorph,
+ 1: ContextualMorph,
+ 2: LigatureMorph,
+ # 3: Reserved,
+ 4: NoncontextualMorph,
+ 5: InsertionMorph,
+ },
+ }
+ lookupTypes["JSTF"] = lookupTypes["GPOS"] # JSTF contains GPOS
+ for lookupEnum in lookupTypes.values():
+ for enum, cls in lookupEnum.items():
+ cls.LookupType = enum
+
+ global featureParamTypes
+ featureParamTypes = {
+ "size": FeatureParamsSize,
+ }
+ for i in range(1, 20 + 1):
+ featureParamTypes["ss%02d" % i] = FeatureParamsStylisticSet
+ for i in range(1, 99 + 1):
+ featureParamTypes["cv%02d" % i] = FeatureParamsCharacterVariants
+
+ # add converters to classes
+ from .otConverters import buildConverters
+
+ for name, table in otData:
+ m = formatPat.match(name)
+ if m:
+ # XxxFormatN subtable, add converter to "base" table
+ name, format = m.groups()
+ format = int(format)
+ cls = namespace[name]
+ if not hasattr(cls, "converters"):
+ cls.converters = {}
+ cls.convertersByName = {}
+ converters, convertersByName = buildConverters(table[1:], namespace)
+ cls.converters[format] = converters
+ cls.convertersByName[format] = convertersByName
+ # XXX Add staticSize?
+ else:
+ cls = namespace[name]
+ cls.converters, cls.convertersByName = buildConverters(table, namespace)
+ # XXX Add staticSize?
_buildClasses()
def _getGlyphsFromCoverageTable(coverage):
- if coverage is None:
- # empty coverage table
- return []
- else:
- return coverage.glyphs
+ if coverage is None:
+ # empty coverage table
+ return []
+ else:
+ return coverage.glyphs
diff --git a/Lib/fontTools/ttLib/tables/otTraverse.py b/Lib/fontTools/ttLib/tables/otTraverse.py
index 40b28b2b..bf22dcfd 100644
--- a/Lib/fontTools/ttLib/tables/otTraverse.py
+++ b/Lib/fontTools/ttLib/tables/otTraverse.py
@@ -12,7 +12,6 @@ __all__ = [
class SubTablePath(Tuple[BaseTable.SubTableEntry, ...]):
-
def __str__(self) -> str:
path_parts = []
for entry in self:
@@ -32,6 +31,9 @@ def dfs_base_table(
root_accessor: Optional[str] = None,
skip_root: bool = False,
predicate: Optional[Callable[[SubTablePath], bool]] = None,
+ iter_subtables_fn: Optional[
+ Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]
+ ] = None,
) -> Iterable[SubTablePath]:
"""Depth-first search tree of BaseTables.
@@ -44,6 +46,9 @@ def dfs_base_table(
predicate (Optional[Callable[[SubTablePath], bool]]): function to filter out
paths. If True, the path is yielded and its subtables are added to the
queue. If False, the path is skipped and its subtables are not traversed.
+ iter_subtables_fn (Optional[Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]]):
+ function to iterate over subtables of a table. If None, the default
+ BaseTable.iterSubTables() is used.
Yields:
SubTablePath: tuples of BaseTable.SubTableEntry(name, table, index) namedtuples
@@ -57,6 +62,7 @@ def dfs_base_table(
skip_root,
predicate,
lambda frontier, new: frontier.extendleft(reversed(new)),
+ iter_subtables_fn,
)
@@ -65,11 +71,14 @@ def bfs_base_table(
root_accessor: Optional[str] = None,
skip_root: bool = False,
predicate: Optional[Callable[[SubTablePath], bool]] = None,
+ iter_subtables_fn: Optional[
+ Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]
+ ] = None,
) -> Iterable[SubTablePath]:
"""Breadth-first search tree of BaseTables.
Args:
- root (BaseTable): the root of the tree.
+ the root of the tree.
root_accessor (Optional[str]): attribute name for the root table, if any (mostly
useful for debugging).
skip_root (Optional[bool]): if True, the root itself is not visited, only its
@@ -77,6 +86,9 @@ def bfs_base_table(
predicate (Optional[Callable[[SubTablePath], bool]]): function to filter out
paths. If True, the path is yielded and its subtables are added to the
queue. If False, the path is skipped and its subtables are not traversed.
+ iter_subtables_fn (Optional[Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]]):
+ function to iterate over subtables of a table. If None, the default
+ BaseTable.iterSubTables() is used.
Yields:
SubTablePath: tuples of BaseTable.SubTableEntry(name, table, index) namedtuples
@@ -90,6 +102,7 @@ def bfs_base_table(
skip_root,
predicate,
lambda frontier, new: frontier.extend(new),
+ iter_subtables_fn,
)
@@ -99,6 +112,9 @@ def _traverse_ot_data(
skip_root: bool,
predicate: Optional[Callable[[SubTablePath], bool]],
add_to_frontier_fn: AddToFrontierFn,
+ iter_subtables_fn: Optional[
+ Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]
+ ] = None,
) -> Iterable[SubTablePath]:
# no visited because general otData cannot cycle (forward-offset only)
if root_accessor is None:
@@ -109,6 +125,11 @@ def _traverse_ot_data(
def predicate(path):
return True
+ if iter_subtables_fn is None:
+
+ def iter_subtables_fn(table):
+ return table.iterSubTables()
+
frontier: Deque[SubTablePath] = deque()
root_entry = BaseTable.SubTableEntry(root_accessor, root)
@@ -117,7 +138,10 @@ def _traverse_ot_data(
else:
add_to_frontier_fn(
frontier,
- [(root_entry, subtable_entry) for subtable_entry in root.iterSubTables()],
+ [
+ (root_entry, subtable_entry)
+ for subtable_entry in iter_subtables_fn(root)
+ ],
)
while frontier:
@@ -131,7 +155,7 @@ def _traverse_ot_data(
yield SubTablePath(path)
new_entries = [
- path + (subtable_entry,) for subtable_entry in current.iterSubTables()
+ path + (subtable_entry,) for subtable_entry in iter_subtables_fn(current)
]
add_to_frontier_fn(frontier, new_entries)
diff --git a/Lib/fontTools/ttLib/tables/sbixGlyph.py b/Lib/fontTools/ttLib/tables/sbixGlyph.py
index fe29c090..fd687a18 100644
--- a/Lib/fontTools/ttLib/tables/sbixGlyph.py
+++ b/Lib/fontTools/ttLib/tables/sbixGlyph.py
@@ -20,98 +20,126 @@ sbixGlyphHeaderFormatSize = sstruct.calcsize(sbixGlyphHeaderFormat)
class Glyph(object):
- def __init__(self, glyphName=None, referenceGlyphName=None, originOffsetX=0, originOffsetY=0, graphicType=None, imageData=None, rawdata=None, gid=0):
- self.gid = gid
- self.glyphName = glyphName
- self.referenceGlyphName = referenceGlyphName
- self.originOffsetX = originOffsetX
- self.originOffsetY = originOffsetY
- self.rawdata = rawdata
- self.graphicType = graphicType
- self.imageData = imageData
-
- # fix self.graphicType if it is null terminated or too short
- if self.graphicType is not None:
- if self.graphicType[-1] == "\0":
- self.graphicType = self.graphicType[:-1]
- if len(self.graphicType) > 4:
- from fontTools import ttLib
- raise ttLib.TTLibError("Glyph.graphicType must not be longer than 4 characters.")
- elif len(self.graphicType) < 4:
- # pad with spaces
- self.graphicType += " "[:(4 - len(self.graphicType))]
-
- def decompile(self, ttFont):
- self.glyphName = ttFont.getGlyphName(self.gid)
- if self.rawdata is None:
- from fontTools import ttLib
- raise ttLib.TTLibError("No table data to decompile")
- if len(self.rawdata) > 0:
- if len(self.rawdata) < sbixGlyphHeaderFormatSize:
- from fontTools import ttLib
- #print "Glyph %i header too short: Expected %x, got %x." % (self.gid, sbixGlyphHeaderFormatSize, len(self.rawdata))
- raise ttLib.TTLibError("Glyph header too short.")
-
- sstruct.unpack(sbixGlyphHeaderFormat, self.rawdata[:sbixGlyphHeaderFormatSize], self)
-
- if self.graphicType == "dupe":
- # this glyph is a reference to another glyph's image data
- gid, = struct.unpack(">H", self.rawdata[sbixGlyphHeaderFormatSize:])
- self.referenceGlyphName = ttFont.getGlyphName(gid)
- else:
- self.imageData = self.rawdata[sbixGlyphHeaderFormatSize:]
- self.referenceGlyphName = None
- # clean up
- del self.rawdata
- del self.gid
-
- def compile(self, ttFont):
- if self.glyphName is None:
- from fontTools import ttLib
- raise ttLib.TTLibError("Can't compile Glyph without glyph name")
- # TODO: if ttFont has no maxp, cmap etc., ignore glyph names and compile by index?
- # (needed if you just want to compile the sbix table on its own)
- self.gid = struct.pack(">H", ttFont.getGlyphID(self.glyphName))
- if self.graphicType is None:
- self.rawdata = b""
- else:
- self.rawdata = sstruct.pack(sbixGlyphHeaderFormat, self) + self.imageData
-
- def toXML(self, xmlWriter, ttFont):
- if self.graphicType == None:
- # TODO: ignore empty glyphs?
- # a glyph data entry is required for each glyph,
- # but empty ones can be calculated at compile time
- xmlWriter.simpletag("glyph", name=self.glyphName)
- xmlWriter.newline()
- return
- xmlWriter.begintag("glyph",
- graphicType=self.graphicType,
- name=self.glyphName,
- originOffsetX=self.originOffsetX,
- originOffsetY=self.originOffsetY,
- )
- xmlWriter.newline()
- if self.graphicType == "dupe":
- # graphicType == "dupe" is a reference to another glyph id.
- xmlWriter.simpletag("ref", glyphname=self.referenceGlyphName)
- else:
- xmlWriter.begintag("hexdata")
- xmlWriter.newline()
- xmlWriter.dumphex(self.imageData)
- xmlWriter.endtag("hexdata")
- xmlWriter.newline()
- xmlWriter.endtag("glyph")
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "ref":
- # glyph is a "dupe", i.e. a reference to another glyph's image data.
- # in this case imageData contains the glyph id of the reference glyph
- # get glyph id from glyphname
- self.imageData = struct.pack(">H", ttFont.getGlyphID(safeEval("'''" + attrs["glyphname"] + "'''")))
- elif name == "hexdata":
- self.imageData = readHex(content)
- else:
- from fontTools import ttLib
- raise ttLib.TTLibError("can't handle '%s' element" % name)
+ def __init__(
+ self,
+ glyphName=None,
+ referenceGlyphName=None,
+ originOffsetX=0,
+ originOffsetY=0,
+ graphicType=None,
+ imageData=None,
+ rawdata=None,
+ gid=0,
+ ):
+ self.gid = gid
+ self.glyphName = glyphName
+ self.referenceGlyphName = referenceGlyphName
+ self.originOffsetX = originOffsetX
+ self.originOffsetY = originOffsetY
+ self.rawdata = rawdata
+ self.graphicType = graphicType
+ self.imageData = imageData
+
+ # fix self.graphicType if it is null terminated or too short
+ if self.graphicType is not None:
+ if self.graphicType[-1] == "\0":
+ self.graphicType = self.graphicType[:-1]
+ if len(self.graphicType) > 4:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError(
+ "Glyph.graphicType must not be longer than 4 characters."
+ )
+ elif len(self.graphicType) < 4:
+ # pad with spaces
+ self.graphicType += " "[: (4 - len(self.graphicType))]
+
+ def decompile(self, ttFont):
+ self.glyphName = ttFont.getGlyphName(self.gid)
+ if self.rawdata is None:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError("No table data to decompile")
+ if len(self.rawdata) > 0:
+ if len(self.rawdata) < sbixGlyphHeaderFormatSize:
+ from fontTools import ttLib
+
+ # print "Glyph %i header too short: Expected %x, got %x." % (self.gid, sbixGlyphHeaderFormatSize, len(self.rawdata))
+ raise ttLib.TTLibError("Glyph header too short.")
+
+ sstruct.unpack(
+ sbixGlyphHeaderFormat, self.rawdata[:sbixGlyphHeaderFormatSize], self
+ )
+
+ if self.graphicType == "dupe":
+ # this glyph is a reference to another glyph's image data
+ (gid,) = struct.unpack(">H", self.rawdata[sbixGlyphHeaderFormatSize:])
+ self.referenceGlyphName = ttFont.getGlyphName(gid)
+ else:
+ self.imageData = self.rawdata[sbixGlyphHeaderFormatSize:]
+ self.referenceGlyphName = None
+ # clean up
+ del self.rawdata
+ del self.gid
+
+ def compile(self, ttFont):
+ if self.glyphName is None:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError("Can't compile Glyph without glyph name")
+ # TODO: if ttFont has no maxp, cmap etc., ignore glyph names and compile by index?
+ # (needed if you just want to compile the sbix table on its own)
+ self.gid = struct.pack(">H", ttFont.getGlyphID(self.glyphName))
+ if self.graphicType is None:
+ rawdata = b""
+ else:
+ rawdata = sstruct.pack(sbixGlyphHeaderFormat, self)
+ if self.graphicType == "dupe":
+ rawdata += struct.pack(">H", ttFont.getGlyphID(self.referenceGlyphName))
+ else:
+ assert self.imageData is not None
+ rawdata += self.imageData
+ self.rawdata = rawdata
+
+ def toXML(self, xmlWriter, ttFont):
+ if self.graphicType is None:
+ # TODO: ignore empty glyphs?
+ # a glyph data entry is required for each glyph,
+ # but empty ones can be calculated at compile time
+ xmlWriter.simpletag("glyph", name=self.glyphName)
+ xmlWriter.newline()
+ return
+ xmlWriter.begintag(
+ "glyph",
+ graphicType=self.graphicType,
+ name=self.glyphName,
+ originOffsetX=self.originOffsetX,
+ originOffsetY=self.originOffsetY,
+ )
+ xmlWriter.newline()
+ if self.graphicType == "dupe":
+ # graphicType == "dupe" is a reference to another glyph id.
+ xmlWriter.simpletag("ref", glyphname=self.referenceGlyphName)
+ else:
+ xmlWriter.begintag("hexdata")
+ xmlWriter.newline()
+ xmlWriter.dumphex(self.imageData)
+ xmlWriter.endtag("hexdata")
+ xmlWriter.newline()
+ xmlWriter.endtag("glyph")
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "ref":
+ # glyph is a "dupe", i.e. a reference to another glyph's image data.
+ # in this case imageData contains the glyph id of the reference glyph
+ # get glyph id from glyphname
+ glyphname = safeEval("'''" + attrs["glyphname"] + "'''")
+ self.imageData = struct.pack(">H", ttFont.getGlyphID(glyphname))
+ self.referenceGlyphName = glyphname
+ elif name == "hexdata":
+ self.imageData = readHex(content)
+ else:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError("can't handle '%s' element" % name)
diff --git a/Lib/fontTools/ttLib/tables/sbixStrike.py b/Lib/fontTools/ttLib/tables/sbixStrike.py
index b367a99f..7614af4c 100644
--- a/Lib/fontTools/ttLib/tables/sbixStrike.py
+++ b/Lib/fontTools/ttLib/tables/sbixStrike.py
@@ -22,127 +22,156 @@ sbixGlyphDataOffsetFormatSize = sstruct.calcsize(sbixGlyphDataOffsetFormat)
class Strike(object):
- def __init__(self, rawdata=None, ppem=0, resolution=72):
- self.data = rawdata
- self.ppem = ppem
- self.resolution = resolution
- self.glyphs = {}
-
- def decompile(self, ttFont):
- if self.data is None:
- from fontTools import ttLib
- raise ttLib.TTLibError
- if len(self.data) < sbixStrikeHeaderFormatSize:
- from fontTools import ttLib
- raise(ttLib.TTLibError, "Strike header too short: Expected %x, got %x.") \
- % (sbixStrikeHeaderFormatSize, len(self.data))
-
- # read Strike header from raw data
- sstruct.unpack(sbixStrikeHeaderFormat, self.data[:sbixStrikeHeaderFormatSize], self)
-
- # calculate number of glyphs
- firstGlyphDataOffset, = struct.unpack(">L", \
- self.data[sbixStrikeHeaderFormatSize:sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize])
- self.numGlyphs = (firstGlyphDataOffset - sbixStrikeHeaderFormatSize) // sbixGlyphDataOffsetFormatSize - 1
- # ^ -1 because there's one more offset than glyphs
-
- # build offset list for single glyph data offsets
- self.glyphDataOffsets = []
- for i in range(self.numGlyphs + 1): # + 1 because there's one more offset than glyphs
- start = i * sbixGlyphDataOffsetFormatSize + sbixStrikeHeaderFormatSize
- current_offset, = struct.unpack(">L", self.data[start:start + sbixGlyphDataOffsetFormatSize])
- self.glyphDataOffsets.append(current_offset)
-
- # iterate through offset list and slice raw data into glyph data records
- for i in range(self.numGlyphs):
- current_glyph = Glyph(rawdata=self.data[self.glyphDataOffsets[i]:self.glyphDataOffsets[i+1]], gid=i)
- current_glyph.decompile(ttFont)
- self.glyphs[current_glyph.glyphName] = current_glyph
- del self.glyphDataOffsets
- del self.numGlyphs
- del self.data
-
- def compile(self, ttFont):
- self.glyphDataOffsets = b""
- self.bitmapData = b""
-
- glyphOrder = ttFont.getGlyphOrder()
-
- # first glyph starts right after the header
- currentGlyphDataOffset = sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize * (len(glyphOrder) + 1)
- for glyphName in glyphOrder:
- if glyphName in self.glyphs:
- # we have glyph data for this glyph
- current_glyph = self.glyphs[glyphName]
- else:
- # must add empty glyph data record for this glyph
- current_glyph = Glyph(glyphName=glyphName)
- current_glyph.compile(ttFont)
- current_glyph.glyphDataOffset = currentGlyphDataOffset
- self.bitmapData += current_glyph.rawdata
- currentGlyphDataOffset += len(current_glyph.rawdata)
- self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, current_glyph)
-
- # add last "offset", really the end address of the last glyph data record
- dummy = Glyph()
- dummy.glyphDataOffset = currentGlyphDataOffset
- self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, dummy)
-
- # pack header
- self.data = sstruct.pack(sbixStrikeHeaderFormat, self)
- # add offsets and image data after header
- self.data += self.glyphDataOffsets + self.bitmapData
-
- def toXML(self, xmlWriter, ttFont):
- xmlWriter.begintag("strike")
- xmlWriter.newline()
- xmlWriter.simpletag("ppem", value=self.ppem)
- xmlWriter.newline()
- xmlWriter.simpletag("resolution", value=self.resolution)
- xmlWriter.newline()
- glyphOrder = ttFont.getGlyphOrder()
- for i in range(len(glyphOrder)):
- if glyphOrder[i] in self.glyphs:
- self.glyphs[glyphOrder[i]].toXML(xmlWriter, ttFont)
- # TODO: what if there are more glyph data records than (glyf table) glyphs?
- xmlWriter.endtag("strike")
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name in ["ppem", "resolution"]:
- setattr(self, name, safeEval(attrs["value"]))
- elif name == "glyph":
- if "graphicType" in attrs:
- myFormat = safeEval("'''" + attrs["graphicType"] + "'''")
- else:
- myFormat = None
- if "glyphname" in attrs:
- myGlyphName = safeEval("'''" + attrs["glyphname"] + "'''")
- elif "name" in attrs:
- myGlyphName = safeEval("'''" + attrs["name"] + "'''")
- else:
- from fontTools import ttLib
- raise ttLib.TTLibError("Glyph must have a glyph name.")
- if "originOffsetX" in attrs:
- myOffsetX = safeEval(attrs["originOffsetX"])
- else:
- myOffsetX = 0
- if "originOffsetY" in attrs:
- myOffsetY = safeEval(attrs["originOffsetY"])
- else:
- myOffsetY = 0
- current_glyph = Glyph(
- glyphName=myGlyphName,
- graphicType=myFormat,
- originOffsetX=myOffsetX,
- originOffsetY=myOffsetY,
- )
- for element in content:
- if isinstance(element, tuple):
- name, attrs, content = element
- current_glyph.fromXML(name, attrs, content, ttFont)
- current_glyph.compile(ttFont)
- self.glyphs[current_glyph.glyphName] = current_glyph
- else:
- from fontTools import ttLib
- raise ttLib.TTLibError("can't handle '%s' element" % name)
+ def __init__(self, rawdata=None, ppem=0, resolution=72):
+ self.data = rawdata
+ self.ppem = ppem
+ self.resolution = resolution
+ self.glyphs = {}
+
+ def decompile(self, ttFont):
+ if self.data is None:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError
+ if len(self.data) < sbixStrikeHeaderFormatSize:
+ from fontTools import ttLib
+
+ raise (
+ ttLib.TTLibError,
+ "Strike header too short: Expected %x, got %x.",
+ ) % (sbixStrikeHeaderFormatSize, len(self.data))
+
+ # read Strike header from raw data
+ sstruct.unpack(
+ sbixStrikeHeaderFormat, self.data[:sbixStrikeHeaderFormatSize], self
+ )
+
+ # calculate number of glyphs
+ (firstGlyphDataOffset,) = struct.unpack(
+ ">L",
+ self.data[
+ sbixStrikeHeaderFormatSize : sbixStrikeHeaderFormatSize
+ + sbixGlyphDataOffsetFormatSize
+ ],
+ )
+ self.numGlyphs = (
+ firstGlyphDataOffset - sbixStrikeHeaderFormatSize
+ ) // sbixGlyphDataOffsetFormatSize - 1
+ # ^ -1 because there's one more offset than glyphs
+
+ # build offset list for single glyph data offsets
+ self.glyphDataOffsets = []
+ for i in range(
+ self.numGlyphs + 1
+ ): # + 1 because there's one more offset than glyphs
+ start = i * sbixGlyphDataOffsetFormatSize + sbixStrikeHeaderFormatSize
+ (current_offset,) = struct.unpack(
+ ">L", self.data[start : start + sbixGlyphDataOffsetFormatSize]
+ )
+ self.glyphDataOffsets.append(current_offset)
+
+ # iterate through offset list and slice raw data into glyph data records
+ for i in range(self.numGlyphs):
+ current_glyph = Glyph(
+ rawdata=self.data[
+ self.glyphDataOffsets[i] : self.glyphDataOffsets[i + 1]
+ ],
+ gid=i,
+ )
+ current_glyph.decompile(ttFont)
+ self.glyphs[current_glyph.glyphName] = current_glyph
+ del self.glyphDataOffsets
+ del self.numGlyphs
+ del self.data
+
+ def compile(self, ttFont):
+ self.glyphDataOffsets = b""
+ self.bitmapData = b""
+
+ glyphOrder = ttFont.getGlyphOrder()
+
+ # first glyph starts right after the header
+ currentGlyphDataOffset = (
+ sbixStrikeHeaderFormatSize
+ + sbixGlyphDataOffsetFormatSize * (len(glyphOrder) + 1)
+ )
+ for glyphName in glyphOrder:
+ if glyphName in self.glyphs:
+ # we have glyph data for this glyph
+ current_glyph = self.glyphs[glyphName]
+ else:
+ # must add empty glyph data record for this glyph
+ current_glyph = Glyph(glyphName=glyphName)
+ current_glyph.compile(ttFont)
+ current_glyph.glyphDataOffset = currentGlyphDataOffset
+ self.bitmapData += current_glyph.rawdata
+ currentGlyphDataOffset += len(current_glyph.rawdata)
+ self.glyphDataOffsets += sstruct.pack(
+ sbixGlyphDataOffsetFormat, current_glyph
+ )
+
+ # add last "offset", really the end address of the last glyph data record
+ dummy = Glyph()
+ dummy.glyphDataOffset = currentGlyphDataOffset
+ self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, dummy)
+
+ # pack header
+ self.data = sstruct.pack(sbixStrikeHeaderFormat, self)
+ # add offsets and image data after header
+ self.data += self.glyphDataOffsets + self.bitmapData
+
+ def toXML(self, xmlWriter, ttFont):
+ xmlWriter.begintag("strike")
+ xmlWriter.newline()
+ xmlWriter.simpletag("ppem", value=self.ppem)
+ xmlWriter.newline()
+ xmlWriter.simpletag("resolution", value=self.resolution)
+ xmlWriter.newline()
+ glyphOrder = ttFont.getGlyphOrder()
+ for i in range(len(glyphOrder)):
+ if glyphOrder[i] in self.glyphs:
+ self.glyphs[glyphOrder[i]].toXML(xmlWriter, ttFont)
+ # TODO: what if there are more glyph data records than (glyf table) glyphs?
+ xmlWriter.endtag("strike")
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name in ["ppem", "resolution"]:
+ setattr(self, name, safeEval(attrs["value"]))
+ elif name == "glyph":
+ if "graphicType" in attrs:
+ myFormat = safeEval("'''" + attrs["graphicType"] + "'''")
+ else:
+ myFormat = None
+ if "glyphname" in attrs:
+ myGlyphName = safeEval("'''" + attrs["glyphname"] + "'''")
+ elif "name" in attrs:
+ myGlyphName = safeEval("'''" + attrs["name"] + "'''")
+ else:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError("Glyph must have a glyph name.")
+ if "originOffsetX" in attrs:
+ myOffsetX = safeEval(attrs["originOffsetX"])
+ else:
+ myOffsetX = 0
+ if "originOffsetY" in attrs:
+ myOffsetY = safeEval(attrs["originOffsetY"])
+ else:
+ myOffsetY = 0
+ current_glyph = Glyph(
+ glyphName=myGlyphName,
+ graphicType=myFormat,
+ originOffsetX=myOffsetX,
+ originOffsetY=myOffsetY,
+ )
+ for element in content:
+ if isinstance(element, tuple):
+ name, attrs, content = element
+ current_glyph.fromXML(name, attrs, content, ttFont)
+ current_glyph.compile(ttFont)
+ self.glyphs[current_glyph.glyphName] = current_glyph
+ else:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError("can't handle '%s' element" % name)
diff --git a/Lib/fontTools/ttLib/tables/ttProgram.py b/Lib/fontTools/ttLib/tables/ttProgram.py
index 72377583..84aa63f3 100644
--- a/Lib/fontTools/ttLib/tables/ttProgram.py
+++ b/Lib/fontTools/ttLib/tables/ttProgram.py
@@ -1,187 +1,197 @@
"""ttLib.tables.ttProgram.py -- Assembler/disassembler for TrueType bytecode programs."""
+from __future__ import annotations
from fontTools.misc.textTools import num2binary, binary2num, readHex, strjoin
import array
from io import StringIO
+from typing import List
import re
import logging
log = logging.getLogger(__name__)
+# fmt: off
+
# first, the list of instructions that eat bytes or words from the instruction stream
streamInstructions = [
#
-# opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes
+# opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes
#
- (0x40, 'NPUSHB', 0, 'PushNBytes', 0, -1), # n, b1, b2,...bn b1,b2...bn
- (0x41, 'NPUSHW', 0, 'PushNWords', 0, -1), # n, w1, w2,...w w1,w2...wn
- (0xb0, 'PUSHB', 3, 'PushBytes', 0, -1), # b0, b1,..bn b0, b1, ...,bn
- (0xb8, 'PUSHW', 3, 'PushWords', 0, -1), # w0,w1,..wn w0 ,w1, ...wn
+ (0x40, 'NPUSHB', 0, 'PushNBytes', 0, -1), # n, b1, b2,...bn b1,b2...bn
+ (0x41, 'NPUSHW', 0, 'PushNWords', 0, -1), # n, w1, w2,...w w1,w2...wn
+ (0xb0, 'PUSHB', 3, 'PushBytes', 0, -1), # b0, b1,..bn b0, b1, ...,bn
+ (0xb8, 'PUSHW', 3, 'PushWords', 0, -1), # w0,w1,..wn w0 ,w1, ...wn
]
-# next, the list of "normal" instructions
+# next, the list of "normal" instructions
instructions = [
#
-#, opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes
+# opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes
#
- (0x7f, 'AA', 0, 'AdjustAngle', 1, 0), # p -
- (0x64, 'ABS', 0, 'Absolute', 1, 1), # n |n|
- (0x60, 'ADD', 0, 'Add', 2, 1), # n2, n1 (n1 + n2)
- (0x27, 'ALIGNPTS', 0, 'AlignPts', 2, 0), # p2, p1 -
- (0x3c, 'ALIGNRP', 0, 'AlignRelativePt', -1, 0), # p1, p2, ... , ploopvalue -
- (0x5a, 'AND', 0, 'LogicalAnd', 2, 1), # e2, e1 b
- (0x2b, 'CALL', 0, 'CallFunction', 1, 0), # f -
- (0x67, 'CEILING', 0, 'Ceiling', 1, 1), # n ceil(n)
- (0x25, 'CINDEX', 0, 'CopyXToTopStack', 1, 1), # k ek
- (0x22, 'CLEAR', 0, 'ClearStack', -1, 0), # all items on the stack -
- (0x4f, 'DEBUG', 0, 'DebugCall', 1, 0), # n -
- (0x73, 'DELTAC1', 0, 'DeltaExceptionC1', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
- (0x74, 'DELTAC2', 0, 'DeltaExceptionC2', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
- (0x75, 'DELTAC3', 0, 'DeltaExceptionC3', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
- (0x5d, 'DELTAP1', 0, 'DeltaExceptionP1', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
- (0x71, 'DELTAP2', 0, 'DeltaExceptionP2', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
- (0x72, 'DELTAP3', 0, 'DeltaExceptionP3', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
- (0x24, 'DEPTH', 0, 'GetDepthStack', 0, 1), # - n
- (0x62, 'DIV', 0, 'Divide', 2, 1), # n2, n1 (n1 * 64)/ n2
- (0x20, 'DUP', 0, 'DuplicateTopStack', 1, 2), # e e, e
- (0x59, 'EIF', 0, 'EndIf', 0, 0), # - -
- (0x1b, 'ELSE', 0, 'Else', 0, 0), # - -
- (0x2d, 'ENDF', 0, 'EndFunctionDefinition', 0, 0), # - -
- (0x54, 'EQ', 0, 'Equal', 2, 1), # e2, e1 b
- (0x57, 'EVEN', 0, 'Even', 1, 1), # e b
- (0x2c, 'FDEF', 0, 'FunctionDefinition', 1, 0), # f -
- (0x4e, 'FLIPOFF', 0, 'SetAutoFlipOff', 0, 0), # - -
- (0x4d, 'FLIPON', 0, 'SetAutoFlipOn', 0, 0), # - -
- (0x80, 'FLIPPT', 0, 'FlipPoint', -1, 0), # p1, p2, ..., ploopvalue -
- (0x82, 'FLIPRGOFF', 0, 'FlipRangeOff', 2, 0), # h, l -
- (0x81, 'FLIPRGON', 0, 'FlipRangeOn', 2, 0), # h, l -
- (0x66, 'FLOOR', 0, 'Floor', 1, 1), # n floor(n)
- (0x46, 'GC', 1, 'GetCoordOnPVector', 1, 1), # p c
- (0x88, 'GETINFO', 0, 'GetInfo', 1, 1), # selector result
- (0x91, 'GETVARIATION', 0, 'GetVariation', 0, -1), # - a1,..,an
- (0x0d, 'GFV', 0, 'GetFVector', 0, 2), # - px, py
- (0x0c, 'GPV', 0, 'GetPVector', 0, 2), # - px, py
- (0x52, 'GT', 0, 'GreaterThan', 2, 1), # e2, e1 b
- (0x53, 'GTEQ', 0, 'GreaterThanOrEqual', 2, 1), # e2, e1 b
- (0x89, 'IDEF', 0, 'InstructionDefinition', 1, 0), # f -
- (0x58, 'IF', 0, 'If', 1, 0), # e -
- (0x8e, 'INSTCTRL', 0, 'SetInstrExecControl', 2, 0), # s, v -
- (0x39, 'IP', 0, 'InterpolatePts', -1, 0), # p1, p2, ... , ploopvalue -
- (0x0f, 'ISECT', 0, 'MovePtToIntersect', 5, 0), # a1, a0, b1, b0, p -
- (0x30, 'IUP', 1, 'InterpolateUntPts', 0, 0), # - -
- (0x1c, 'JMPR', 0, 'Jump', 1, 0), # offset -
- (0x79, 'JROF', 0, 'JumpRelativeOnFalse', 2, 0), # e, offset -
- (0x78, 'JROT', 0, 'JumpRelativeOnTrue', 2, 0), # e, offset -
- (0x2a, 'LOOPCALL', 0, 'LoopAndCallFunction', 2, 0), # f, count -
- (0x50, 'LT', 0, 'LessThan', 2, 1), # e2, e1 b
- (0x51, 'LTEQ', 0, 'LessThenOrEqual', 2, 1), # e2, e1 b
- (0x8b, 'MAX', 0, 'Maximum', 2, 1), # e2, e1 max(e1, e2)
- (0x49, 'MD', 1, 'MeasureDistance', 2, 1), # p2,p1 d
- (0x2e, 'MDAP', 1, 'MoveDirectAbsPt', 1, 0), # p -
- (0xc0, 'MDRP', 5, 'MoveDirectRelPt', 1, 0), # p -
- (0x3e, 'MIAP', 1, 'MoveIndirectAbsPt', 2, 0), # n, p -
- (0x8c, 'MIN', 0, 'Minimum', 2, 1), # e2, e1 min(e1, e2)
- (0x26, 'MINDEX', 0, 'MoveXToTopStack', 1, 1), # k ek
- (0xe0, 'MIRP', 5, 'MoveIndirectRelPt', 2, 0), # n, p -
- (0x4b, 'MPPEM', 0, 'MeasurePixelPerEm', 0, 1), # - ppem
- (0x4c, 'MPS', 0, 'MeasurePointSize', 0, 1), # - pointSize
- (0x3a, 'MSIRP', 1, 'MoveStackIndirRelPt', 2, 0), # d, p -
- (0x63, 'MUL', 0, 'Multiply', 2, 1), # n2, n1 (n1 * n2)/64
- (0x65, 'NEG', 0, 'Negate', 1, 1), # n -n
- (0x55, 'NEQ', 0, 'NotEqual', 2, 1), # e2, e1 b
- (0x5c, 'NOT', 0, 'LogicalNot', 1, 1), # e ( not e )
- (0x6c, 'NROUND', 2, 'NoRound', 1, 1), # n1 n2
- (0x56, 'ODD', 0, 'Odd', 1, 1), # e b
- (0x5b, 'OR', 0, 'LogicalOr', 2, 1), # e2, e1 b
- (0x21, 'POP', 0, 'PopTopStack', 1, 0), # e -
- (0x45, 'RCVT', 0, 'ReadCVT', 1, 1), # location value
- (0x7d, 'RDTG', 0, 'RoundDownToGrid', 0, 0), # - -
- (0x7a, 'ROFF', 0, 'RoundOff', 0, 0), # - -
- (0x8a, 'ROLL', 0, 'RollTopThreeStack', 3, 3), # a,b,c b,a,c
- (0x68, 'ROUND', 2, 'Round', 1, 1), # n1 n2
- (0x43, 'RS', 0, 'ReadStore', 1, 1), # n v
- (0x3d, 'RTDG', 0, 'RoundToDoubleGrid', 0, 0), # - -
- (0x18, 'RTG', 0, 'RoundToGrid', 0, 0), # - -
- (0x19, 'RTHG', 0, 'RoundToHalfGrid', 0, 0), # - -
- (0x7c, 'RUTG', 0, 'RoundUpToGrid', 0, 0), # - -
- (0x77, 'S45ROUND', 0, 'SuperRound45Degrees', 1, 0), # n -
- (0x7e, 'SANGW', 0, 'SetAngleWeight', 1, 0), # weight -
- (0x85, 'SCANCTRL', 0, 'ScanConversionControl', 1, 0), # n -
- (0x8d, 'SCANTYPE', 0, 'ScanType', 1, 0), # n -
- (0x48, 'SCFS', 0, 'SetCoordFromStackFP', 2, 0), # c, p -
- (0x1d, 'SCVTCI', 0, 'SetCVTCutIn', 1, 0), # n -
- (0x5e, 'SDB', 0, 'SetDeltaBaseInGState', 1, 0), # n -
- (0x86, 'SDPVTL', 1, 'SetDualPVectorToLine', 2, 0), # p2, p1 -
- (0x5f, 'SDS', 0, 'SetDeltaShiftInGState',1, 0), # n -
- (0x0b, 'SFVFS', 0, 'SetFVectorFromStack', 2, 0), # y, x -
- (0x04, 'SFVTCA', 1, 'SetFVectorToAxis', 0, 0), # - -
- (0x08, 'SFVTL', 1, 'SetFVectorToLine', 2, 0), # p2, p1 -
- (0x0e, 'SFVTPV', 0, 'SetFVectorToPVector', 0, 0), # - -
- (0x34, 'SHC', 1, 'ShiftContourByLastPt', 1, 0), # c -
- (0x32, 'SHP', 1, 'ShiftPointByLastPoint',-1, 0), # p1, p2, ..., ploopvalue -
- (0x38, 'SHPIX', 0, 'ShiftZoneByPixel', -1, 0), # d, p1, p2, ..., ploopvalue -
- (0x36, 'SHZ', 1, 'ShiftZoneByLastPoint', 1, 0), # e -
- (0x17, 'SLOOP', 0, 'SetLoopVariable', 1, 0), # n -
- (0x1a, 'SMD', 0, 'SetMinimumDistance', 1, 0), # distance -
- (0x0a, 'SPVFS', 0, 'SetPVectorFromStack', 2, 0), # y, x -
- (0x02, 'SPVTCA', 1, 'SetPVectorToAxis', 0, 0), # - -
- (0x06, 'SPVTL', 1, 'SetPVectorToLine', 2, 0), # p2, p1 -
- (0x76, 'SROUND', 0, 'SuperRound', 1, 0), # n -
- (0x10, 'SRP0', 0, 'SetRefPoint0', 1, 0), # p -
- (0x11, 'SRP1', 0, 'SetRefPoint1', 1, 0), # p -
- (0x12, 'SRP2', 0, 'SetRefPoint2', 1, 0), # p -
- (0x1f, 'SSW', 0, 'SetSingleWidth', 1, 0), # n -
- (0x1e, 'SSWCI', 0, 'SetSingleWidthCutIn', 1, 0), # n -
- (0x61, 'SUB', 0, 'Subtract', 2, 1), # n2, n1 (n1 - n2)
- (0x00, 'SVTCA', 1, 'SetFPVectorToAxis', 0, 0), # - -
- (0x23, 'SWAP', 0, 'SwapTopStack', 2, 2), # e2, e1 e1, e2
- (0x13, 'SZP0', 0, 'SetZonePointer0', 1, 0), # n -
- (0x14, 'SZP1', 0, 'SetZonePointer1', 1, 0), # n -
- (0x15, 'SZP2', 0, 'SetZonePointer2', 1, 0), # n -
- (0x16, 'SZPS', 0, 'SetZonePointerS', 1, 0), # n -
- (0x29, 'UTP', 0, 'UnTouchPt', 1, 0), # p -
- (0x70, 'WCVTF', 0, 'WriteCVTInFUnits', 2, 0), # n, l -
- (0x44, 'WCVTP', 0, 'WriteCVTInPixels', 2, 0), # v, l -
- (0x42, 'WS', 0, 'WriteStore', 2, 0), # v, l -
+ (0x7f, 'AA', 0, 'AdjustAngle', 1, 0), # p -
+ (0x64, 'ABS', 0, 'Absolute', 1, 1), # n |n|
+ (0x60, 'ADD', 0, 'Add', 2, 1), # n2, n1 (n1 + n2)
+ (0x27, 'ALIGNPTS', 0, 'AlignPts', 2, 0), # p2, p1 -
+ (0x3c, 'ALIGNRP', 0, 'AlignRelativePt', -1, 0), # p1, p2, ... , ploopvalue -
+ (0x5a, 'AND', 0, 'LogicalAnd', 2, 1), # e2, e1 b
+ (0x2b, 'CALL', 0, 'CallFunction', 1, 0), # f -
+ (0x67, 'CEILING', 0, 'Ceiling', 1, 1), # n ceil(n)
+ (0x25, 'CINDEX', 0, 'CopyXToTopStack', 1, 1), # k ek
+ (0x22, 'CLEAR', 0, 'ClearStack', -1, 0), # all items on the stack -
+ (0x4f, 'DEBUG', 0, 'DebugCall', 1, 0), # n -
+ (0x73, 'DELTAC1', 0, 'DeltaExceptionC1', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
+ (0x74, 'DELTAC2', 0, 'DeltaExceptionC2', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
+ (0x75, 'DELTAC3', 0, 'DeltaExceptionC3', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
+ (0x5d, 'DELTAP1', 0, 'DeltaExceptionP1', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
+ (0x71, 'DELTAP2', 0, 'DeltaExceptionP2', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
+ (0x72, 'DELTAP3', 0, 'DeltaExceptionP3', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
+ (0x24, 'DEPTH', 0, 'GetDepthStack', 0, 1), # - n
+ (0x62, 'DIV', 0, 'Divide', 2, 1), # n2, n1 (n1 * 64)/ n2
+ (0x20, 'DUP', 0, 'DuplicateTopStack', 1, 2), # e e, e
+ (0x59, 'EIF', 0, 'EndIf', 0, 0), # - -
+ (0x1b, 'ELSE', 0, 'Else', 0, 0), # - -
+ (0x2d, 'ENDF', 0, 'EndFunctionDefinition', 0, 0), # - -
+ (0x54, 'EQ', 0, 'Equal', 2, 1), # e2, e1 b
+ (0x57, 'EVEN', 0, 'Even', 1, 1), # e b
+ (0x2c, 'FDEF', 0, 'FunctionDefinition', 1, 0), # f -
+ (0x4e, 'FLIPOFF', 0, 'SetAutoFlipOff', 0, 0), # - -
+ (0x4d, 'FLIPON', 0, 'SetAutoFlipOn', 0, 0), # - -
+ (0x80, 'FLIPPT', 0, 'FlipPoint', -1, 0), # p1, p2, ..., ploopvalue -
+ (0x82, 'FLIPRGOFF', 0, 'FlipRangeOff', 2, 0), # h, l -
+ (0x81, 'FLIPRGON', 0, 'FlipRangeOn', 2, 0), # h, l -
+ (0x66, 'FLOOR', 0, 'Floor', 1, 1), # n floor(n)
+ (0x46, 'GC', 1, 'GetCoordOnPVector', 1, 1), # p c
+ (0x88, 'GETINFO', 0, 'GetInfo', 1, 1), # selector result
+ (0x91, 'GETVARIATION', 0, 'GetVariation', 0, -1), # - a1,..,an
+ (0x0d, 'GFV', 0, 'GetFVector', 0, 2), # - px, py
+ (0x0c, 'GPV', 0, 'GetPVector', 0, 2), # - px, py
+ (0x52, 'GT', 0, 'GreaterThan', 2, 1), # e2, e1 b
+ (0x53, 'GTEQ', 0, 'GreaterThanOrEqual', 2, 1), # e2, e1 b
+ (0x89, 'IDEF', 0, 'InstructionDefinition', 1, 0), # f -
+ (0x58, 'IF', 0, 'If', 1, 0), # e -
+ (0x8e, 'INSTCTRL', 0, 'SetInstrExecControl', 2, 0), # s, v -
+ (0x39, 'IP', 0, 'InterpolatePts', -1, 0), # p1, p2, ... , ploopvalue -
+ (0x0f, 'ISECT', 0, 'MovePtToIntersect', 5, 0), # a1, a0, b1, b0, p -
+ (0x30, 'IUP', 1, 'InterpolateUntPts', 0, 0), # - -
+ (0x1c, 'JMPR', 0, 'Jump', 1, 0), # offset -
+ (0x79, 'JROF', 0, 'JumpRelativeOnFalse', 2, 0), # e, offset -
+ (0x78, 'JROT', 0, 'JumpRelativeOnTrue', 2, 0), # e, offset -
+ (0x2a, 'LOOPCALL', 0, 'LoopAndCallFunction', 2, 0), # f, count -
+ (0x50, 'LT', 0, 'LessThan', 2, 1), # e2, e1 b
+ (0x51, 'LTEQ', 0, 'LessThenOrEqual', 2, 1), # e2, e1 b
+ (0x8b, 'MAX', 0, 'Maximum', 2, 1), # e2, e1 max(e1, e2)
+ (0x49, 'MD', 1, 'MeasureDistance', 2, 1), # p2,p1 d
+ (0x2e, 'MDAP', 1, 'MoveDirectAbsPt', 1, 0), # p -
+ (0xc0, 'MDRP', 5, 'MoveDirectRelPt', 1, 0), # p -
+ (0x3e, 'MIAP', 1, 'MoveIndirectAbsPt', 2, 0), # n, p -
+ (0x8c, 'MIN', 0, 'Minimum', 2, 1), # e2, e1 min(e1, e2)
+ (0x26, 'MINDEX', 0, 'MoveXToTopStack', 1, 1), # k ek
+ (0xe0, 'MIRP', 5, 'MoveIndirectRelPt', 2, 0), # n, p -
+ (0x4b, 'MPPEM', 0, 'MeasurePixelPerEm', 0, 1), # - ppem
+ (0x4c, 'MPS', 0, 'MeasurePointSize', 0, 1), # - pointSize
+ (0x3a, 'MSIRP', 1, 'MoveStackIndirRelPt', 2, 0), # d, p -
+ (0x63, 'MUL', 0, 'Multiply', 2, 1), # n2, n1 (n1 * n2)/64
+ (0x65, 'NEG', 0, 'Negate', 1, 1), # n -n
+ (0x55, 'NEQ', 0, 'NotEqual', 2, 1), # e2, e1 b
+ (0x5c, 'NOT', 0, 'LogicalNot', 1, 1), # e ( not e )
+ (0x6c, 'NROUND', 2, 'NoRound', 1, 1), # n1 n2
+ (0x56, 'ODD', 0, 'Odd', 1, 1), # e b
+ (0x5b, 'OR', 0, 'LogicalOr', 2, 1), # e2, e1 b
+ (0x21, 'POP', 0, 'PopTopStack', 1, 0), # e -
+ (0x45, 'RCVT', 0, 'ReadCVT', 1, 1), # location value
+ (0x7d, 'RDTG', 0, 'RoundDownToGrid', 0, 0), # - -
+ (0x7a, 'ROFF', 0, 'RoundOff', 0, 0), # - -
+ (0x8a, 'ROLL', 0, 'RollTopThreeStack', 3, 3), # a,b,c b,a,c
+ (0x68, 'ROUND', 2, 'Round', 1, 1), # n1 n2
+ (0x43, 'RS', 0, 'ReadStore', 1, 1), # n v
+ (0x3d, 'RTDG', 0, 'RoundToDoubleGrid', 0, 0), # - -
+ (0x18, 'RTG', 0, 'RoundToGrid', 0, 0), # - -
+ (0x19, 'RTHG', 0, 'RoundToHalfGrid', 0, 0), # - -
+ (0x7c, 'RUTG', 0, 'RoundUpToGrid', 0, 0), # - -
+ (0x77, 'S45ROUND', 0, 'SuperRound45Degrees', 1, 0), # n -
+ (0x7e, 'SANGW', 0, 'SetAngleWeight', 1, 0), # weight -
+ (0x85, 'SCANCTRL', 0, 'ScanConversionControl', 1, 0), # n -
+ (0x8d, 'SCANTYPE', 0, 'ScanType', 1, 0), # n -
+ (0x48, 'SCFS', 0, 'SetCoordFromStackFP', 2, 0), # c, p -
+ (0x1d, 'SCVTCI', 0, 'SetCVTCutIn', 1, 0), # n -
+ (0x5e, 'SDB', 0, 'SetDeltaBaseInGState', 1, 0), # n -
+ (0x86, 'SDPVTL', 1, 'SetDualPVectorToLine', 2, 0), # p2, p1 -
+ (0x5f, 'SDS', 0, 'SetDeltaShiftInGState', 1, 0), # n -
+ (0x0b, 'SFVFS', 0, 'SetFVectorFromStack', 2, 0), # y, x -
+ (0x04, 'SFVTCA', 1, 'SetFVectorToAxis', 0, 0), # - -
+ (0x08, 'SFVTL', 1, 'SetFVectorToLine', 2, 0), # p2, p1 -
+ (0x0e, 'SFVTPV', 0, 'SetFVectorToPVector', 0, 0), # - -
+ (0x34, 'SHC', 1, 'ShiftContourByLastPt', 1, 0), # c -
+ (0x32, 'SHP', 1, 'ShiftPointByLastPoint', -1, 0), # p1, p2, ..., ploopvalue -
+ (0x38, 'SHPIX', 0, 'ShiftZoneByPixel', -1, 0), # d, p1, p2, ..., ploopvalue -
+ (0x36, 'SHZ', 1, 'ShiftZoneByLastPoint', 1, 0), # e -
+ (0x17, 'SLOOP', 0, 'SetLoopVariable', 1, 0), # n -
+ (0x1a, 'SMD', 0, 'SetMinimumDistance', 1, 0), # distance -
+ (0x0a, 'SPVFS', 0, 'SetPVectorFromStack', 2, 0), # y, x -
+ (0x02, 'SPVTCA', 1, 'SetPVectorToAxis', 0, 0), # - -
+ (0x06, 'SPVTL', 1, 'SetPVectorToLine', 2, 0), # p2, p1 -
+ (0x76, 'SROUND', 0, 'SuperRound', 1, 0), # n -
+ (0x10, 'SRP0', 0, 'SetRefPoint0', 1, 0), # p -
+ (0x11, 'SRP1', 0, 'SetRefPoint1', 1, 0), # p -
+ (0x12, 'SRP2', 0, 'SetRefPoint2', 1, 0), # p -
+ (0x1f, 'SSW', 0, 'SetSingleWidth', 1, 0), # n -
+ (0x1e, 'SSWCI', 0, 'SetSingleWidthCutIn', 1, 0), # n -
+ (0x61, 'SUB', 0, 'Subtract', 2, 1), # n2, n1 (n1 - n2)
+ (0x00, 'SVTCA', 1, 'SetFPVectorToAxis', 0, 0), # - -
+ (0x23, 'SWAP', 0, 'SwapTopStack', 2, 2), # e2, e1 e1, e2
+ (0x13, 'SZP0', 0, 'SetZonePointer0', 1, 0), # n -
+ (0x14, 'SZP1', 0, 'SetZonePointer1', 1, 0), # n -
+ (0x15, 'SZP2', 0, 'SetZonePointer2', 1, 0), # n -
+ (0x16, 'SZPS', 0, 'SetZonePointerS', 1, 0), # n -
+ (0x29, 'UTP', 0, 'UnTouchPt', 1, 0), # p -
+ (0x70, 'WCVTF', 0, 'WriteCVTInFUnits', 2, 0), # n, l -
+ (0x44, 'WCVTP', 0, 'WriteCVTInPixels', 2, 0), # v, l -
+ (0x42, 'WS', 0, 'WriteStore', 2, 0), # v, l -
]
+# fmt: on
+
def bitRepr(value, bits):
- s = ""
- for i in range(bits):
- s = "01"[value & 0x1] + s
- value = value >> 1
- return s
+ s = ""
+ for i in range(bits):
+ s = "01"[value & 0x1] + s
+ value = value >> 1
+ return s
_mnemonicPat = re.compile(r"[A-Z][A-Z0-9]*$")
+
def _makeDict(instructionList):
- opcodeDict = {}
- mnemonicDict = {}
- for op, mnemonic, argBits, name, pops, pushes in instructionList:
- assert _mnemonicPat.match(mnemonic)
- mnemonicDict[mnemonic] = op, argBits, name
- if argBits:
- argoffset = op
- for i in range(1 << argBits):
- opcodeDict[op+i] = mnemonic, argBits, argoffset, name
- else:
- opcodeDict[op] = mnemonic, 0, 0, name
- return opcodeDict, mnemonicDict
+ opcodeDict = {}
+ mnemonicDict = {}
+ for op, mnemonic, argBits, name, pops, pushes in instructionList:
+ assert _mnemonicPat.match(mnemonic)
+ mnemonicDict[mnemonic] = op, argBits, name
+ if argBits:
+ argoffset = op
+ for i in range(1 << argBits):
+ opcodeDict[op + i] = mnemonic, argBits, argoffset, name
+ else:
+ opcodeDict[op] = mnemonic, 0, 0, name
+ return opcodeDict, mnemonicDict
+
streamOpcodeDict, streamMnemonicDict = _makeDict(streamInstructions)
opcodeDict, mnemonicDict = _makeDict(instructions)
+
class tt_instructions_error(Exception):
- def __init__(self, error):
- self.error = error
- def __str__(self):
- return "TT instructions error: %s" % repr(self.error)
+ def __init__(self, error):
+ self.error = error
+
+ def __str__(self):
+ return "TT instructions error: %s" % repr(self.error)
_comment = r"/\*.*?\*/"
@@ -197,348 +207,387 @@ _pushCountPat = re.compile(r"[A-Z][A-Z0-9]*\s*\[.*?\]\s*/\* ([0-9]+).*?\*/")
_indentRE = re.compile(r"^FDEF|IF|ELSE\[ \]\t.+")
_unindentRE = re.compile(r"^ELSE|ENDF|EIF\[ \]\t.+")
+
def _skipWhite(data, pos):
- m = _whiteRE.match(data, pos)
- newPos = m.regs[0][1]
- assert newPos >= pos
- return newPos
+ m = _whiteRE.match(data, pos)
+ newPos = m.regs[0][1]
+ assert newPos >= pos
+ return newPos
class Program(object):
-
- def __init__(self):
- pass
-
- def fromBytecode(self, bytecode):
- self.bytecode = array.array("B", bytecode)
- if hasattr(self, "assembly"):
- del self.assembly
-
- def fromAssembly(self, assembly):
- self.assembly = assembly
- if hasattr(self, "bytecode"):
- del self.bytecode
-
- def getBytecode(self):
- if not hasattr(self, "bytecode"):
- self._assemble()
- return self.bytecode.tobytes()
-
- def getAssembly(self, preserve=True):
- if not hasattr(self, "assembly"):
- self._disassemble(preserve=preserve)
- return self.assembly
-
- def toXML(self, writer, ttFont):
- if not hasattr (ttFont, "disassembleInstructions") or ttFont.disassembleInstructions:
- try:
- assembly = self.getAssembly()
- except:
- import traceback
- tmp = StringIO()
- traceback.print_exc(file=tmp)
- msg = "An exception occurred during the decompilation of glyph program:\n\n"
- msg += tmp.getvalue()
- log.error(msg)
- writer.begintag("bytecode")
- writer.newline()
- writer.comment(msg.strip())
- writer.newline()
- writer.dumphex(self.getBytecode())
- writer.endtag("bytecode")
- writer.newline()
- else:
- if not assembly:
- return
- writer.begintag("assembly")
- writer.newline()
- i = 0
- indent = 0
- nInstr = len(assembly)
- while i < nInstr:
- instr = assembly[i]
- if _unindentRE.match(instr):
- indent -= 1
- writer.write(writer.indentwhite * indent)
- writer.write(instr)
- writer.newline()
- m = _pushCountPat.match(instr)
- i = i + 1
- if m:
- nValues = int(m.group(1))
- line = []
- j = 0
- for j in range(nValues):
- if j and not (j % 25):
- writer.write(writer.indentwhite * indent)
- writer.write(' '.join(line))
- writer.newline()
- line = []
- line.append(assembly[i+j])
- writer.write(writer.indentwhite * indent)
- writer.write(' '.join(line))
- writer.newline()
- i = i + j + 1
- if _indentRE.match(instr):
- indent += 1
- writer.endtag("assembly")
- writer.newline()
- else:
- bytecode = self.getBytecode()
- if not bytecode:
- return
- writer.begintag("bytecode")
- writer.newline()
- writer.dumphex(bytecode)
- writer.endtag("bytecode")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "assembly":
- self.fromAssembly(strjoin(content))
- self._assemble()
- del self.assembly
- else:
- assert name == "bytecode"
- self.fromBytecode(readHex(content))
-
- def _assemble(self):
- assembly = getattr(self, 'assembly', [])
- if isinstance(assembly, type([])):
- assembly = ' '.join(assembly)
- bytecode = []
- push = bytecode.append
- lenAssembly = len(assembly)
- pos = _skipWhite(assembly, 0)
- while pos < lenAssembly:
- m = _tokenRE.match(assembly, pos)
- if m is None:
- raise tt_instructions_error("Syntax error in TT program (%s)" % assembly[pos-5:pos+15])
- dummy, mnemonic, arg, number, comment = m.groups()
- pos = m.regs[0][1]
- if comment:
- pos = _skipWhite(assembly, pos)
- continue
-
- arg = arg.strip()
- if mnemonic.startswith("INSTR"):
- # Unknown instruction
- op = int(mnemonic[5:])
- push(op)
- elif mnemonic not in ("PUSH", "NPUSHB", "NPUSHW", "PUSHB", "PUSHW"):
- op, argBits, name = mnemonicDict[mnemonic]
- if len(arg) != argBits:
- raise tt_instructions_error("Incorrect number of argument bits (%s[%s])" % (mnemonic, arg))
- if arg:
- arg = binary2num(arg)
- push(op + arg)
- else:
- push(op)
- else:
- args = []
- pos = _skipWhite(assembly, pos)
- while pos < lenAssembly:
- m = _tokenRE.match(assembly, pos)
- if m is None:
- raise tt_instructions_error("Syntax error in TT program (%s)" % assembly[pos:pos+15])
- dummy, _mnemonic, arg, number, comment = m.groups()
- if number is None and comment is None:
- break
- pos = m.regs[0][1]
- pos = _skipWhite(assembly, pos)
- if comment is not None:
- continue
- args.append(int(number))
- nArgs = len(args)
- if mnemonic == "PUSH":
- # Automatically choose the most compact representation
- nWords = 0
- while nArgs:
- while nWords < nArgs and nWords < 255 and not (0 <= args[nWords] <= 255):
- nWords += 1
- nBytes = 0
- while nWords+nBytes < nArgs and nBytes < 255 and 0 <= args[nWords+nBytes] <= 255:
- nBytes += 1
- if nBytes < 2 and nWords + nBytes < 255 and nWords + nBytes != nArgs:
- # Will write bytes as words
- nWords += nBytes
- continue
-
- # Write words
- if nWords:
- if nWords <= 8:
- op, argBits, name = streamMnemonicDict["PUSHW"]
- op = op + nWords - 1
- push(op)
- else:
- op, argBits, name = streamMnemonicDict["NPUSHW"]
- push(op)
- push(nWords)
- for value in args[:nWords]:
- assert -32768 <= value < 32768, "PUSH value out of range %d" % value
- push((value >> 8) & 0xff)
- push(value & 0xff)
-
- # Write bytes
- if nBytes:
- pass
- if nBytes <= 8:
- op, argBits, name = streamMnemonicDict["PUSHB"]
- op = op + nBytes - 1
- push(op)
- else:
- op, argBits, name = streamMnemonicDict["NPUSHB"]
- push(op)
- push(nBytes)
- for value in args[nWords:nWords+nBytes]:
- push(value)
-
- nTotal = nWords + nBytes
- args = args[nTotal:]
- nArgs -= nTotal
- nWords = 0
- else:
- # Write exactly what we've been asked to
- words = mnemonic[-1] == "W"
- op, argBits, name = streamMnemonicDict[mnemonic]
- if mnemonic[0] != "N":
- assert nArgs <= 8, nArgs
- op = op + nArgs - 1
- push(op)
- else:
- assert nArgs < 256
- push(op)
- push(nArgs)
- if words:
- for value in args:
- assert -32768 <= value < 32768, "PUSHW value out of range %d" % value
- push((value >> 8) & 0xff)
- push(value & 0xff)
- else:
- for value in args:
- assert 0 <= value < 256, "PUSHB value out of range %d" % value
- push(value)
-
- pos = _skipWhite(assembly, pos)
-
- if bytecode:
- assert max(bytecode) < 256 and min(bytecode) >= 0
- self.bytecode = array.array("B", bytecode)
-
- def _disassemble(self, preserve=False):
- assembly = []
- i = 0
- bytecode = getattr(self, 'bytecode', [])
- numBytecode = len(bytecode)
- while i < numBytecode:
- op = bytecode[i]
- try:
- mnemonic, argBits, argoffset, name = opcodeDict[op]
- except KeyError:
- if op in streamOpcodeDict:
- values = []
-
- # Merge consecutive PUSH operations
- while bytecode[i] in streamOpcodeDict:
- op = bytecode[i]
- mnemonic, argBits, argoffset, name = streamOpcodeDict[op]
- words = mnemonic[-1] == "W"
- if argBits:
- nValues = op - argoffset + 1
- else:
- i = i + 1
- nValues = bytecode[i]
- i = i + 1
- assert nValues > 0
- if not words:
- for j in range(nValues):
- value = bytecode[i]
- values.append(repr(value))
- i = i + 1
- else:
- for j in range(nValues):
- # cast to signed int16
- value = (bytecode[i] << 8) | bytecode[i+1]
- if value >= 0x8000:
- value = value - 0x10000
- values.append(repr(value))
- i = i + 2
- if preserve:
- break
-
- if not preserve:
- mnemonic = "PUSH"
- nValues = len(values)
- if nValues == 1:
- assembly.append("%s[ ] /* 1 value pushed */" % mnemonic)
- else:
- assembly.append("%s[ ] /* %s values pushed */" % (mnemonic, nValues))
- assembly.extend(values)
- else:
- assembly.append("INSTR%d[ ]" % op)
- i = i + 1
- else:
- if argBits:
- assembly.append(mnemonic + "[%s] /* %s */" % (num2binary(op - argoffset, argBits), name))
- else:
- assembly.append(mnemonic + "[ ] /* %s */" % name)
- i = i + 1
- self.assembly = assembly
-
- def __bool__(self):
- """
- >>> p = Program()
- >>> bool(p)
- False
- >>> bc = array.array("B", [0])
- >>> p.fromBytecode(bc)
- >>> bool(p)
- True
- >>> p.bytecode.pop()
- 0
- >>> bool(p)
- False
-
- >>> p = Program()
- >>> asm = ['SVTCA[0]']
- >>> p.fromAssembly(asm)
- >>> bool(p)
- True
- >>> p.assembly.pop()
- 'SVTCA[0]'
- >>> bool(p)
- False
- """
- return ((hasattr(self, 'assembly') and len(self.assembly) > 0) or
- (hasattr(self, 'bytecode') and len(self.bytecode) > 0))
-
- __nonzero__ = __bool__
-
- def __eq__(self, other):
- if type(self) != type(other):
- return NotImplemented
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
+ def __init__(self) -> None:
+ pass
+
+ def fromBytecode(self, bytecode: bytes) -> None:
+ self.bytecode = array.array("B", bytecode)
+ if hasattr(self, "assembly"):
+ del self.assembly
+
+ def fromAssembly(self, assembly: List[str] | str) -> None:
+ if isinstance(assembly, list):
+ self.assembly = assembly
+ elif isinstance(assembly, str):
+ self.assembly = assembly.splitlines()
+ else:
+ raise TypeError(f"expected str or List[str], got {type(assembly).__name__}")
+ if hasattr(self, "bytecode"):
+ del self.bytecode
+
+ def getBytecode(self) -> bytes:
+ if not hasattr(self, "bytecode"):
+ self._assemble()
+ return self.bytecode.tobytes()
+
+ def getAssembly(self, preserve=True) -> List[str]:
+ if not hasattr(self, "assembly"):
+ self._disassemble(preserve=preserve)
+ return self.assembly
+
+ def toXML(self, writer, ttFont) -> None:
+ if (
+ not hasattr(ttFont, "disassembleInstructions")
+ or ttFont.disassembleInstructions
+ ):
+ try:
+ assembly = self.getAssembly()
+ except:
+ import traceback
+
+ tmp = StringIO()
+ traceback.print_exc(file=tmp)
+ msg = "An exception occurred during the decompilation of glyph program:\n\n"
+ msg += tmp.getvalue()
+ log.error(msg)
+ writer.begintag("bytecode")
+ writer.newline()
+ writer.comment(msg.strip())
+ writer.newline()
+ writer.dumphex(self.getBytecode())
+ writer.endtag("bytecode")
+ writer.newline()
+ else:
+ if not assembly:
+ return
+ writer.begintag("assembly")
+ writer.newline()
+ i = 0
+ indent = 0
+ nInstr = len(assembly)
+ while i < nInstr:
+ instr = assembly[i]
+ if _unindentRE.match(instr):
+ indent -= 1
+ writer.write(writer.indentwhite * indent)
+ writer.write(instr)
+ writer.newline()
+ m = _pushCountPat.match(instr)
+ i = i + 1
+ if m:
+ nValues = int(m.group(1))
+ line: List[str] = []
+ j = 0
+ for j in range(nValues):
+ if j and not (j % 25):
+ writer.write(writer.indentwhite * indent)
+ writer.write(" ".join(line))
+ writer.newline()
+ line = []
+ line.append(assembly[i + j])
+ writer.write(writer.indentwhite * indent)
+ writer.write(" ".join(line))
+ writer.newline()
+ i = i + j + 1
+ if _indentRE.match(instr):
+ indent += 1
+ writer.endtag("assembly")
+ writer.newline()
+ else:
+ bytecode = self.getBytecode()
+ if not bytecode:
+ return
+ writer.begintag("bytecode")
+ writer.newline()
+ writer.dumphex(bytecode)
+ writer.endtag("bytecode")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont) -> None:
+ if name == "assembly":
+ self.fromAssembly(strjoin(content))
+ self._assemble()
+ del self.assembly
+ else:
+ assert name == "bytecode"
+ self.fromBytecode(readHex(content))
+
+ def _assemble(self) -> None:
+ assembly = " ".join(getattr(self, "assembly", []))
+ bytecode: List[int] = []
+ push = bytecode.append
+ lenAssembly = len(assembly)
+ pos = _skipWhite(assembly, 0)
+ while pos < lenAssembly:
+ m = _tokenRE.match(assembly, pos)
+ if m is None:
+ raise tt_instructions_error(
+ "Syntax error in TT program (%s)" % assembly[pos - 5 : pos + 15]
+ )
+ dummy, mnemonic, arg, number, comment = m.groups()
+ pos = m.regs[0][1]
+ if comment:
+ pos = _skipWhite(assembly, pos)
+ continue
+
+ arg = arg.strip()
+ if mnemonic.startswith("INSTR"):
+ # Unknown instruction
+ op = int(mnemonic[5:])
+ push(op)
+ elif mnemonic not in ("PUSH", "NPUSHB", "NPUSHW", "PUSHB", "PUSHW"):
+ op, argBits, name = mnemonicDict[mnemonic]
+ if len(arg) != argBits:
+ raise tt_instructions_error(
+ "Incorrect number of argument bits (%s[%s])" % (mnemonic, arg)
+ )
+ if arg:
+ arg = binary2num(arg)
+ push(op + arg)
+ else:
+ push(op)
+ else:
+ args = []
+ pos = _skipWhite(assembly, pos)
+ while pos < lenAssembly:
+ m = _tokenRE.match(assembly, pos)
+ if m is None:
+ raise tt_instructions_error(
+ "Syntax error in TT program (%s)" % assembly[pos : pos + 15]
+ )
+ dummy, _mnemonic, arg, number, comment = m.groups()
+ if number is None and comment is None:
+ break
+ pos = m.regs[0][1]
+ pos = _skipWhite(assembly, pos)
+ if comment is not None:
+ continue
+ args.append(int(number))
+ nArgs = len(args)
+ if mnemonic == "PUSH":
+ # Automatically choose the most compact representation
+ nWords = 0
+ while nArgs:
+ while (
+ nWords < nArgs
+ and nWords < 255
+ and not (0 <= args[nWords] <= 255)
+ ):
+ nWords += 1
+ nBytes = 0
+ while (
+ nWords + nBytes < nArgs
+ and nBytes < 255
+ and 0 <= args[nWords + nBytes] <= 255
+ ):
+ nBytes += 1
+ if (
+ nBytes < 2
+ and nWords + nBytes < 255
+ and nWords + nBytes != nArgs
+ ):
+ # Will write bytes as words
+ nWords += nBytes
+ continue
+
+ # Write words
+ if nWords:
+ if nWords <= 8:
+ op, argBits, name = streamMnemonicDict["PUSHW"]
+ op = op + nWords - 1
+ push(op)
+ else:
+ op, argBits, name = streamMnemonicDict["NPUSHW"]
+ push(op)
+ push(nWords)
+ for value in args[:nWords]:
+ assert -32768 <= value < 32768, (
+ "PUSH value out of range %d" % value
+ )
+ push((value >> 8) & 0xFF)
+ push(value & 0xFF)
+
+ # Write bytes
+ if nBytes:
+ pass
+ if nBytes <= 8:
+ op, argBits, name = streamMnemonicDict["PUSHB"]
+ op = op + nBytes - 1
+ push(op)
+ else:
+ op, argBits, name = streamMnemonicDict["NPUSHB"]
+ push(op)
+ push(nBytes)
+ for value in args[nWords : nWords + nBytes]:
+ push(value)
+
+ nTotal = nWords + nBytes
+ args = args[nTotal:]
+ nArgs -= nTotal
+ nWords = 0
+ else:
+ # Write exactly what we've been asked to
+ words = mnemonic[-1] == "W"
+ op, argBits, name = streamMnemonicDict[mnemonic]
+ if mnemonic[0] != "N":
+ assert nArgs <= 8, nArgs
+ op = op + nArgs - 1
+ push(op)
+ else:
+ assert nArgs < 256
+ push(op)
+ push(nArgs)
+ if words:
+ for value in args:
+ assert -32768 <= value < 32768, (
+ "PUSHW value out of range %d" % value
+ )
+ push((value >> 8) & 0xFF)
+ push(value & 0xFF)
+ else:
+ for value in args:
+ assert 0 <= value < 256, (
+ "PUSHB value out of range %d" % value
+ )
+ push(value)
+
+ pos = _skipWhite(assembly, pos)
+
+ if bytecode:
+ assert max(bytecode) < 256 and min(bytecode) >= 0
+ self.bytecode = array.array("B", bytecode)
+
+ def _disassemble(self, preserve=False) -> None:
+ assembly = []
+ i = 0
+ bytecode = getattr(self, "bytecode", [])
+ numBytecode = len(bytecode)
+ while i < numBytecode:
+ op = bytecode[i]
+ try:
+ mnemonic, argBits, argoffset, name = opcodeDict[op]
+ except KeyError:
+ if op in streamOpcodeDict:
+ values = []
+
+ # Merge consecutive PUSH operations
+ while bytecode[i] in streamOpcodeDict:
+ op = bytecode[i]
+ mnemonic, argBits, argoffset, name = streamOpcodeDict[op]
+ words = mnemonic[-1] == "W"
+ if argBits:
+ nValues = op - argoffset + 1
+ else:
+ i = i + 1
+ nValues = bytecode[i]
+ i = i + 1
+ assert nValues > 0
+ if not words:
+ for j in range(nValues):
+ value = bytecode[i]
+ values.append(repr(value))
+ i = i + 1
+ else:
+ for j in range(nValues):
+ # cast to signed int16
+ value = (bytecode[i] << 8) | bytecode[i + 1]
+ if value >= 0x8000:
+ value = value - 0x10000
+ values.append(repr(value))
+ i = i + 2
+ if preserve:
+ break
+
+ if not preserve:
+ mnemonic = "PUSH"
+ nValues = len(values)
+ if nValues == 1:
+ assembly.append("%s[ ] /* 1 value pushed */" % mnemonic)
+ else:
+ assembly.append(
+ "%s[ ] /* %s values pushed */" % (mnemonic, nValues)
+ )
+ assembly.extend(values)
+ else:
+ assembly.append("INSTR%d[ ]" % op)
+ i = i + 1
+ else:
+ if argBits:
+ assembly.append(
+ mnemonic
+ + "[%s] /* %s */" % (num2binary(op - argoffset, argBits), name)
+ )
+ else:
+ assembly.append(mnemonic + "[ ] /* %s */" % name)
+ i = i + 1
+ self.assembly = assembly
+
+ def __bool__(self) -> bool:
+ """
+ >>> p = Program()
+ >>> bool(p)
+ False
+ >>> bc = array.array("B", [0])
+ >>> p.fromBytecode(bc)
+ >>> bool(p)
+ True
+ >>> p.bytecode.pop()
+ 0
+ >>> bool(p)
+ False
+
+ >>> p = Program()
+ >>> asm = ['SVTCA[0]']
+ >>> p.fromAssembly(asm)
+ >>> bool(p)
+ True
+ >>> p.assembly.pop()
+ 'SVTCA[0]'
+ >>> bool(p)
+ False
+ """
+ return (hasattr(self, "assembly") and len(self.assembly) > 0) or (
+ hasattr(self, "bytecode") and len(self.bytecode) > 0
+ )
+
+ __nonzero__ = __bool__
+
+ def __eq__(self, other) -> bool:
+ if type(self) != type(other):
+ return NotImplemented
+ return self.__dict__ == other.__dict__
+
+ def __ne__(self, other) -> bool:
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
def _test():
- """
- >>> _test()
- True
- """
+ """
+ >>> _test()
+ True
+ """
- bc = b"""@;:9876543210/.-,+*)(\'&%$#"! \037\036\035\034\033\032\031\030\027\026\025\024\023\022\021\020\017\016\015\014\013\012\011\010\007\006\005\004\003\002\001\000,\001\260\030CXEj\260\031C`\260F#D#\020 \260FN\360M/\260\000\022\033!#\0213Y-,\001\260\030CX\260\005+\260\000\023K\260\024PX\261\000@8Y\260\006+\033!#\0213Y-,\001\260\030CXN\260\003%\020\362!\260\000\022M\033 E\260\004%\260\004%#Jad\260(RX!#\020\326\033\260\003%\020\362!\260\000\022YY-,\260\032CX!!\033\260\002%\260\002%I\260\003%\260\003%Ja d\260\020PX!!!\033\260\003%\260\003%I\260\000PX\260\000PX\270\377\3428!\033\260\0208!Y\033\260\000RX\260\0368!\033\270\377\3608!YYYY-,\001\260\030CX\260\005+\260\000\023K\260\024PX\271\000\000\377\3008Y\260\006+\033!#\0213Y-,N\001\212\020\261F\031CD\260\000\024\261\000F\342\260\000\025\271\000\000\377\3608\000\260\000<\260(+\260\002%\020\260\000<-,\001\030\260\000/\260\001\024\362\260\001\023\260\001\025M\260\000\022-,\001\260\030CX\260\005+\260\000\023\271\000\000\377\3408\260\006+\033!#\0213Y-,\001\260\030CXEdj#Edi\260\031Cd``\260F#D#\020 \260F\360/\260\000\022\033!! \212 \212RX\0213\033!!YY-,\001\261\013\012C#Ce\012-,\000\261\012\013C#C\013-,\000\260F#p\261\001F>\001\260F#p\261\002FE:\261\002\000\010\015-,\260\022+\260\002%E\260\002%Ej\260@\213`\260\002%#D!!!-,\260\023+\260\002%E\260\002%Ej\270\377\300\214`\260\002%#D!!!-,\260\000\260\022+!!!-,\260\000\260\023+!!!-,\001\260\006C\260\007Ce\012-, i\260@a\260\000\213 \261,\300\212\214\270\020\000b`+\014d#da\\X\260\003aY-,\261\000\003%EhT\260\034KPZX\260\003%E\260\003%E`h \260\004%#D\260\004%#D\033\260\003% Eh \212#D\260\003%Eh`\260\003%#DY-,\260\003% Eh \212#D\260\003%Edhe`\260\004%\260\001`#D-,\260\011CX\207!\300\033\260\022CX\207E\260\021+\260G#D\260Gz\344\033\003\212E\030i \260G#D\212\212\207 \260\240QX\260\021+\260G#D\260Gz\344\033!\260Gz\344YYY\030-, \212E#Eh`D-,EjB-,\001\030/-,\001\260\030CX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260\031C`\260F#D!\212\020\260F\366!\033!!!!Y-,\001\260\030CX\260\002%E\260\002%Ed`j\260\003%Eja \260\004%Ej \212\213e\260\004%#D\214\260\003%#D!!\033 EjD EjDY-,\001 E\260\000U\260\030CZXEh#Ei\260@\213a \260\200bj \212#a \260\003%\213e\260\004%#D\214\260\003%#D!!\033!!\260\031+Y-,\001\212\212Ed#EdadB-,\260\004%\260\004%\260\031+\260\030CX\260\004%\260\004%\260\003%\260\033+\001\260\002%C\260@T\260\002%C\260\000TZX\260\003% E\260@aDY\260\002%C\260\000T\260\002%C\260@TZX\260\004% E\260@`DYY!!!!-,\001KRXC\260\002%E#aD\033!!Y-,\001KRXC\260\002%E#`D\033!!Y-,KRXED\033!!Y-,\001 \260\003%#I\260@`\260 c \260\000RX#\260\002%8#\260\002%e8\000\212c8\033!!!!!Y\001-,KPXED\033!!Y-,\001\260\005%\020# \212\365\000\260\001`#\355\354-,\001\260\005%\020# \212\365\000\260\001a#\355\354-,\001\260\006%\020\365\000\355\354-,F#F`\212\212F# F\212`\212a\270\377\200b# \020#\212\261KK\212pE` \260\000PX\260\001a\270\377\272\213\033\260F\214Y\260\020`h\001:-, E\260\003%FRX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-, E\260\003%FPX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-,\000\260\007C\260\006C\013-,\212\020\354-,\260\014CX!\033 F\260\000RX\270\377\3608\033\260\0208YY-, \260\000UX\270\020\000c\260\003%Ed\260\003%Eda\260\000SX\260\002\033\260@a\260\003Y%EiSXED\033!!Y\033!\260\002%E\260\002%Ead\260(QXED\033!!YY-,!!\014d#d\213\270@\000b-,!\260\200QX\014d#d\213\270 \000b\033\262\000@/+Y\260\002`-,!\260\300QX\014d#d\213\270\025Ub\033\262\000\200/+Y\260\002`-,\014d#d\213\270@\000b`#!-,KSX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260F#D!\212\020\260F\366!\033!\212\021#\022 9/Y-,\260\002%\260\002%Id\260\300TX\270\377\3708\260\0108\033!!Y-,\260\023CX\003\033\002Y-,\260\023CX\002\033\003Y-,\260\012+#\020 <\260\027+-,\260\002%\270\377\3608\260(+\212\020# \320#\260\020+\260\005CX\300\033<Y \020\021\260\000\022\001-,KS#KQZX8\033!!Y-,\001\260\002%\020\320#\311\001\260\001\023\260\000\024\020\260\001<\260\001\026-,\001\260\000\023\260\001\260\003%I\260\003\0278\260\001\023-,KS#KQZX E\212`D\033!!Y-, 9/-"""
+ bc = b"""@;:9876543210/.-,+*)(\'&%$#"! \037\036\035\034\033\032\031\030\027\026\025\024\023\022\021\020\017\016\015\014\013\012\011\010\007\006\005\004\003\002\001\000,\001\260\030CXEj\260\031C`\260F#D#\020 \260FN\360M/\260\000\022\033!#\0213Y-,\001\260\030CX\260\005+\260\000\023K\260\024PX\261\000@8Y\260\006+\033!#\0213Y-,\001\260\030CXN\260\003%\020\362!\260\000\022M\033 E\260\004%\260\004%#Jad\260(RX!#\020\326\033\260\003%\020\362!\260\000\022YY-,\260\032CX!!\033\260\002%\260\002%I\260\003%\260\003%Ja d\260\020PX!!!\033\260\003%\260\003%I\260\000PX\260\000PX\270\377\3428!\033\260\0208!Y\033\260\000RX\260\0368!\033\270\377\3608!YYYY-,\001\260\030CX\260\005+\260\000\023K\260\024PX\271\000\000\377\3008Y\260\006+\033!#\0213Y-,N\001\212\020\261F\031CD\260\000\024\261\000F\342\260\000\025\271\000\000\377\3608\000\260\000<\260(+\260\002%\020\260\000<-,\001\030\260\000/\260\001\024\362\260\001\023\260\001\025M\260\000\022-,\001\260\030CX\260\005+\260\000\023\271\000\000\377\3408\260\006+\033!#\0213Y-,\001\260\030CXEdj#Edi\260\031Cd``\260F#D#\020 \260F\360/\260\000\022\033!! \212 \212RX\0213\033!!YY-,\001\261\013\012C#Ce\012-,\000\261\012\013C#C\013-,\000\260F#p\261\001F>\001\260F#p\261\002FE:\261\002\000\010\015-,\260\022+\260\002%E\260\002%Ej\260@\213`\260\002%#D!!!-,\260\023+\260\002%E\260\002%Ej\270\377\300\214`\260\002%#D!!!-,\260\000\260\022+!!!-,\260\000\260\023+!!!-,\001\260\006C\260\007Ce\012-, i\260@a\260\000\213 \261,\300\212\214\270\020\000b`+\014d#da\\X\260\003aY-,\261\000\003%EhT\260\034KPZX\260\003%E\260\003%E`h \260\004%#D\260\004%#D\033\260\003% Eh \212#D\260\003%Eh`\260\003%#DY-,\260\003% Eh \212#D\260\003%Edhe`\260\004%\260\001`#D-,\260\011CX\207!\300\033\260\022CX\207E\260\021+\260G#D\260Gz\344\033\003\212E\030i \260G#D\212\212\207 \260\240QX\260\021+\260G#D\260Gz\344\033!\260Gz\344YYY\030-, \212E#Eh`D-,EjB-,\001\030/-,\001\260\030CX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260\031C`\260F#D!\212\020\260F\366!\033!!!!Y-,\001\260\030CX\260\002%E\260\002%Ed`j\260\003%Eja \260\004%Ej \212\213e\260\004%#D\214\260\003%#D!!\033 EjD EjDY-,\001 E\260\000U\260\030CZXEh#Ei\260@\213a \260\200bj \212#a \260\003%\213e\260\004%#D\214\260\003%#D!!\033!!\260\031+Y-,\001\212\212Ed#EdadB-,\260\004%\260\004%\260\031+\260\030CX\260\004%\260\004%\260\003%\260\033+\001\260\002%C\260@T\260\002%C\260\000TZX\260\003% E\260@aDY\260\002%C\260\000T\260\002%C\260@TZX\260\004% E\260@`DYY!!!!-,\001KRXC\260\002%E#aD\033!!Y-,\001KRXC\260\002%E#`D\033!!Y-,KRXED\033!!Y-,\001 \260\003%#I\260@`\260 c \260\000RX#\260\002%8#\260\002%e8\000\212c8\033!!!!!Y\001-,KPXED\033!!Y-,\001\260\005%\020# \212\365\000\260\001`#\355\354-,\001\260\005%\020# \212\365\000\260\001a#\355\354-,\001\260\006%\020\365\000\355\354-,F#F`\212\212F# F\212`\212a\270\377\200b# \020#\212\261KK\212pE` \260\000PX\260\001a\270\377\272\213\033\260F\214Y\260\020`h\001:-, E\260\003%FRX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-, E\260\003%FPX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-,\000\260\007C\260\006C\013-,\212\020\354-,\260\014CX!\033 F\260\000RX\270\377\3608\033\260\0208YY-, \260\000UX\270\020\000c\260\003%Ed\260\003%Eda\260\000SX\260\002\033\260@a\260\003Y%EiSXED\033!!Y\033!\260\002%E\260\002%Ead\260(QXED\033!!YY-,!!\014d#d\213\270@\000b-,!\260\200QX\014d#d\213\270 \000b\033\262\000@/+Y\260\002`-,!\260\300QX\014d#d\213\270\025Ub\033\262\000\200/+Y\260\002`-,\014d#d\213\270@\000b`#!-,KSX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260F#D!\212\020\260F\366!\033!\212\021#\022 9/Y-,\260\002%\260\002%Id\260\300TX\270\377\3708\260\0108\033!!Y-,\260\023CX\003\033\002Y-,\260\023CX\002\033\003Y-,\260\012+#\020 <\260\027+-,\260\002%\270\377\3608\260(+\212\020# \320#\260\020+\260\005CX\300\033<Y \020\021\260\000\022\001-,KS#KQZX8\033!!Y-,\001\260\002%\020\320#\311\001\260\001\023\260\000\024\020\260\001<\260\001\026-,\001\260\000\023\260\001\260\003%I\260\003\0278\260\001\023-,KS#KQZX E\212`D\033!!Y-, 9/-"""
+
+ p = Program()
+ p.fromBytecode(bc)
+ asm = p.getAssembly(preserve=True)
+ p.fromAssembly(asm)
+ print(bc == p.getBytecode())
- p = Program()
- p.fromBytecode(bc)
- asm = p.getAssembly(preserve=True)
- p.fromAssembly(asm)
- print(bc == p.getBytecode())
if __name__ == "__main__":
- import sys
- import doctest
- sys.exit(doctest.testmod().failed)
+ import sys
+ import doctest
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/ttLib/ttCollection.py b/Lib/fontTools/ttLib/ttCollection.py
index f0922127..70ed4b7a 100644
--- a/Lib/fontTools/ttLib/ttCollection.py
+++ b/Lib/fontTools/ttLib/ttCollection.py
@@ -9,118 +9,118 @@ log = logging.getLogger(__name__)
class TTCollection(object):
- """Object representing a TrueType Collection / OpenType Collection.
- The main API is self.fonts being a list of TTFont instances.
-
- If shareTables is True, then different fonts in the collection
- might point to the same table object if the data for the table was
- the same in the font file. Note, however, that this might result
- in suprises and incorrect behavior if the different fonts involved
- have different GlyphOrder. Use only if you know what you are doing.
- """
-
- def __init__(self, file=None, shareTables=False, **kwargs):
- fonts = self.fonts = []
- if file is None:
- return
-
- assert 'fontNumber' not in kwargs, kwargs
-
- closeStream = False
- if not hasattr(file, "read"):
- file = open(file, "rb")
- closeStream = True
-
- tableCache = {} if shareTables else None
-
- header = readTTCHeader(file)
- for i in range(header.numFonts):
- font = TTFont(file, fontNumber=i, _tableCache=tableCache, **kwargs)
- fonts.append(font)
-
- # don't close file if lazy=True, as the TTFont hold a reference to the original
- # file; the file will be closed once the TTFonts are closed in the
- # TTCollection.close(). We still want to close the file if lazy is None or
- # False, because in that case the TTFont no longer need the original file
- # and we want to avoid 'ResourceWarning: unclosed file'.
- if not kwargs.get("lazy") and closeStream:
- file.close()
-
- def __enter__(self):
- return self
-
- def __exit__(self, type, value, traceback):
- self.close()
-
- def close(self):
- for font in self.fonts:
- font.close()
-
- def save(self, file, shareTables=True):
- """Save the font to disk. Similarly to the constructor,
- the 'file' argument can be either a pathname or a writable
- file object.
- """
- if not hasattr(file, "write"):
- final = None
- file = open(file, "wb")
- else:
- # assume "file" is a writable file object
- # write to a temporary stream to allow saving to unseekable streams
- final = file
- file = BytesIO()
-
- tableCache = {} if shareTables else None
-
- offsets_offset = writeTTCHeader(file, len(self.fonts))
- offsets = []
- for font in self.fonts:
- offsets.append(file.tell())
- font._save(file, tableCache=tableCache)
- file.seek(0,2)
-
- file.seek(offsets_offset)
- file.write(struct.pack(">%dL" % len(self.fonts), *offsets))
-
- if final:
- final.write(file.getvalue())
- file.close()
-
- def saveXML(self, fileOrPath, newlinestr="\n", writeVersion=True, **kwargs):
-
- from fontTools.misc import xmlWriter
- writer = xmlWriter.XMLWriter(fileOrPath, newlinestr=newlinestr)
-
- if writeVersion:
- from fontTools import version
- version = ".".join(version.split('.')[:2])
- writer.begintag("ttCollection", ttLibVersion=version)
- else:
- writer.begintag("ttCollection")
- writer.newline()
- writer.newline()
-
- for font in self.fonts:
- font._saveXML(writer, writeVersion=False, **kwargs)
- writer.newline()
-
- writer.endtag("ttCollection")
- writer.newline()
-
- writer.close()
-
-
- def __getitem__(self, item):
- return self.fonts[item]
-
- def __setitem__(self, item, value):
- self.fonts[item] = value
-
- def __delitem__(self, item):
- return self.fonts[item]
+ """Object representing a TrueType Collection / OpenType Collection.
+ The main API is self.fonts being a list of TTFont instances.
+
+ If shareTables is True, then different fonts in the collection
+ might point to the same table object if the data for the table was
+ the same in the font file. Note, however, that this might result
+ in suprises and incorrect behavior if the different fonts involved
+ have different GlyphOrder. Use only if you know what you are doing.
+ """
+
+ def __init__(self, file=None, shareTables=False, **kwargs):
+ fonts = self.fonts = []
+ if file is None:
+ return
+
+ assert "fontNumber" not in kwargs, kwargs
+
+ closeStream = False
+ if not hasattr(file, "read"):
+ file = open(file, "rb")
+ closeStream = True
+
+ tableCache = {} if shareTables else None
+
+ header = readTTCHeader(file)
+ for i in range(header.numFonts):
+ font = TTFont(file, fontNumber=i, _tableCache=tableCache, **kwargs)
+ fonts.append(font)
+
+ # don't close file if lazy=True, as the TTFont hold a reference to the original
+ # file; the file will be closed once the TTFonts are closed in the
+ # TTCollection.close(). We still want to close the file if lazy is None or
+ # False, because in that case the TTFont no longer need the original file
+ # and we want to avoid 'ResourceWarning: unclosed file'.
+ if not kwargs.get("lazy") and closeStream:
+ file.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.close()
+
+ def close(self):
+ for font in self.fonts:
+ font.close()
+
+ def save(self, file, shareTables=True):
+ """Save the font to disk. Similarly to the constructor,
+ the 'file' argument can be either a pathname or a writable
+ file object.
+ """
+ if not hasattr(file, "write"):
+ final = None
+ file = open(file, "wb")
+ else:
+ # assume "file" is a writable file object
+ # write to a temporary stream to allow saving to unseekable streams
+ final = file
+ file = BytesIO()
+
+ tableCache = {} if shareTables else None
+
+ offsets_offset = writeTTCHeader(file, len(self.fonts))
+ offsets = []
+ for font in self.fonts:
+ offsets.append(file.tell())
+ font._save(file, tableCache=tableCache)
+ file.seek(0, 2)
+
+ file.seek(offsets_offset)
+ file.write(struct.pack(">%dL" % len(self.fonts), *offsets))
+
+ if final:
+ final.write(file.getvalue())
+ file.close()
+
+ def saveXML(self, fileOrPath, newlinestr="\n", writeVersion=True, **kwargs):
+ from fontTools.misc import xmlWriter
+
+ writer = xmlWriter.XMLWriter(fileOrPath, newlinestr=newlinestr)
+
+ if writeVersion:
+ from fontTools import version
+
+ version = ".".join(version.split(".")[:2])
+ writer.begintag("ttCollection", ttLibVersion=version)
+ else:
+ writer.begintag("ttCollection")
+ writer.newline()
+ writer.newline()
+
+ for font in self.fonts:
+ font._saveXML(writer, writeVersion=False, **kwargs)
+ writer.newline()
+
+ writer.endtag("ttCollection")
+ writer.newline()
+
+ writer.close()
+
+ def __getitem__(self, item):
+ return self.fonts[item]
+
+ def __setitem__(self, item, value):
+ self.fonts[item] = value
+
+ def __delitem__(self, item):
+ return self.fonts[item]
- def __len__(self):
- return len(self.fonts)
+ def __len__(self):
+ return len(self.fonts)
- def __iter__(self):
- return iter(self.fonts)
+ def __iter__(self):
+ return iter(self.fonts)
diff --git a/Lib/fontTools/ttLib/ttFont.py b/Lib/fontTools/ttLib/ttFont.py
index 327d113f..6a9ca098 100644
--- a/Lib/fontTools/ttLib/ttFont.py
+++ b/Lib/fontTools/ttLib/ttFont.py
@@ -4,793 +4,890 @@ from fontTools.misc.configTools import AbstractConfig
from fontTools.misc.textTools import Tag, byteord, tostr
from fontTools.misc.loggingTools import deprecateArgument
from fontTools.ttLib import TTLibError
-from fontTools.ttLib.ttGlyphSet import (
- _TTGlyphSet, _TTGlyph,
- _TTGlyphCFF, _TTGlyphGlyf,
- _TTVarGlyphSet,
-)
+from fontTools.ttLib.ttGlyphSet import _TTGlyph, _TTGlyphSetCFF, _TTGlyphSetGlyf
from fontTools.ttLib.sfnt import SFNTReader, SFNTWriter
-from io import BytesIO, StringIO
+from io import BytesIO, StringIO, UnsupportedOperation
import os
import logging
import traceback
log = logging.getLogger(__name__)
+
class TTFont(object):
- """Represents a TrueType font.
-
- The object manages file input and output, and offers a convenient way of
- accessing tables. Tables will be only decompiled when necessary, ie. when
- they're actually accessed. This means that simple operations can be extremely fast.
-
- Example usage::
-
- >> from fontTools import ttLib
- >> tt = ttLib.TTFont("afont.ttf") # Load an existing font file
- >> tt['maxp'].numGlyphs
- 242
- >> tt['OS/2'].achVendID
- 'B&H\000'
- >> tt['head'].unitsPerEm
- 2048
-
- For details of the objects returned when accessing each table, see :ref:`tables`.
- To add a table to the font, use the :py:func:`newTable` function::
-
- >> os2 = newTable("OS/2")
- >> os2.version = 4
- >> # set other attributes
- >> font["OS/2"] = os2
-
- TrueType fonts can also be serialized to and from XML format (see also the
- :ref:`ttx` binary)::
-
- >> tt.saveXML("afont.ttx")
- Dumping 'LTSH' table...
- Dumping 'OS/2' table...
- [...]
-
- >> tt2 = ttLib.TTFont() # Create a new font object
- >> tt2.importXML("afont.ttx")
- >> tt2['maxp'].numGlyphs
- 242
-
- The TTFont object may be used as a context manager; this will cause the file
- reader to be closed after the context ``with`` block is exited::
-
- with TTFont(filename) as f:
- # Do stuff
-
- Args:
- file: When reading a font from disk, either a pathname pointing to a file,
- or a readable file object.
- res_name_or_index: If running on a Macintosh, either a sfnt resource name or
- an sfnt resource index number. If the index number is zero, TTLib will
- autodetect whether the file is a flat file or a suitcase. (If it is a suitcase,
- only the first 'sfnt' resource will be read.)
- sfntVersion (str): When constructing a font object from scratch, sets the four-byte
- sfnt magic number to be used. Defaults to ``\0\1\0\0`` (TrueType). To create
- an OpenType file, use ``OTTO``.
- flavor (str): Set this to ``woff`` when creating a WOFF file or ``woff2`` for a WOFF2
- file.
- checkChecksums (int): How checksum data should be treated. Default is 0
- (no checking). Set to 1 to check and warn on wrong checksums; set to 2 to
- raise an exception if any wrong checksums are found.
- recalcBBoxes (bool): If true (the default), recalculates ``glyf``, ``CFF ``,
- ``head`` bounding box values and ``hhea``/``vhea`` min/max values on save.
- Also compiles the glyphs on importing, which saves memory consumption and
- time.
- ignoreDecompileErrors (bool): If true, exceptions raised during table decompilation
- will be ignored, and the binary data will be returned for those tables instead.
- recalcTimestamp (bool): If true (the default), sets the ``modified`` timestamp in
- the ``head`` table on save.
- fontNumber (int): The index of the font in a TrueType Collection file.
- lazy (bool): If lazy is set to True, many data structures are loaded lazily, upon
- access only. If it is set to False, many data structures are loaded immediately.
- The default is ``lazy=None`` which is somewhere in between.
- """
-
- def __init__(self, file=None, res_name_or_index=None,
- sfntVersion="\000\001\000\000", flavor=None, checkChecksums=0,
- verbose=None, recalcBBoxes=True, allowVID=NotImplemented, ignoreDecompileErrors=False,
- recalcTimestamp=True, fontNumber=-1, lazy=None, quiet=None,
- _tableCache=None, cfg={}):
- for name in ("verbose", "quiet"):
- val = locals().get(name)
- if val is not None:
- deprecateArgument(name, "configure logging instead")
- setattr(self, name, val)
-
- self.lazy = lazy
- self.recalcBBoxes = recalcBBoxes
- self.recalcTimestamp = recalcTimestamp
- self.tables = {}
- self.reader = None
- self.cfg = cfg.copy() if isinstance(cfg, AbstractConfig) else Config(cfg)
- self.ignoreDecompileErrors = ignoreDecompileErrors
-
- if not file:
- self.sfntVersion = sfntVersion
- self.flavor = flavor
- self.flavorData = None
- return
- if not hasattr(file, "read"):
- closeStream = True
- # assume file is a string
- if res_name_or_index is not None:
- # see if it contains 'sfnt' resources in the resource or data fork
- from . import macUtils
- if res_name_or_index == 0:
- if macUtils.getSFNTResIndices(file):
- # get the first available sfnt font.
- file = macUtils.SFNTResourceReader(file, 1)
- else:
- file = open(file, "rb")
- else:
- file = macUtils.SFNTResourceReader(file, res_name_or_index)
- else:
- file = open(file, "rb")
- else:
- # assume "file" is a readable file object
- closeStream = False
- file.seek(0)
-
- if not self.lazy:
- # read input file in memory and wrap a stream around it to allow overwriting
- file.seek(0)
- tmp = BytesIO(file.read())
- if hasattr(file, 'name'):
- # save reference to input file name
- tmp.name = file.name
- if closeStream:
- file.close()
- file = tmp
- self._tableCache = _tableCache
- self.reader = SFNTReader(file, checkChecksums, fontNumber=fontNumber)
- self.sfntVersion = self.reader.sfntVersion
- self.flavor = self.reader.flavor
- self.flavorData = self.reader.flavorData
-
- def __enter__(self):
- return self
-
- def __exit__(self, type, value, traceback):
- self.close()
-
- def close(self):
- """If we still have a reader object, close it."""
- if self.reader is not None:
- self.reader.close()
-
- def save(self, file, reorderTables=True):
- """Save the font to disk.
-
- Args:
- file: Similarly to the constructor, can be either a pathname or a writable
- file object.
- reorderTables (Option[bool]): If true (the default), reorder the tables,
- sorting them by tag (recommended by the OpenType specification). If
- false, retain the original font order. If None, reorder by table
- dependency (fastest).
- """
- if not hasattr(file, "write"):
- if self.lazy and self.reader.file.name == file:
- raise TTLibError(
- "Can't overwrite TTFont when 'lazy' attribute is True")
- createStream = True
- else:
- # assume "file" is a writable file object
- createStream = False
-
- tmp = BytesIO()
-
- writer_reordersTables = self._save(tmp)
-
- if not (reorderTables is None or writer_reordersTables or
- (reorderTables is False and self.reader is None)):
- if reorderTables is False:
- # sort tables using the original font's order
- tableOrder = list(self.reader.keys())
- else:
- # use the recommended order from the OpenType specification
- tableOrder = None
- tmp.flush()
- tmp2 = BytesIO()
- reorderFontTables(tmp, tmp2, tableOrder)
- tmp.close()
- tmp = tmp2
-
- if createStream:
- # "file" is a path
- with open(file, "wb") as file:
- file.write(tmp.getvalue())
- else:
- file.write(tmp.getvalue())
-
- tmp.close()
-
- def _save(self, file, tableCache=None):
- """Internal function, to be shared by save() and TTCollection.save()"""
-
- if self.recalcTimestamp and 'head' in self:
- self['head'] # make sure 'head' is loaded so the recalculation is actually done
-
- tags = list(self.keys())
- if "GlyphOrder" in tags:
- tags.remove("GlyphOrder")
- numTables = len(tags)
- # write to a temporary stream to allow saving to unseekable streams
- writer = SFNTWriter(file, numTables, self.sfntVersion, self.flavor, self.flavorData)
-
- done = []
- for tag in tags:
- self._writeTable(tag, writer, done, tableCache)
-
- writer.close()
-
- return writer.reordersTables()
-
- def saveXML(self, fileOrPath, newlinestr="\n", **kwargs):
- """Export the font as TTX (an XML-based text file), or as a series of text
- files when splitTables is true. In the latter case, the 'fileOrPath'
- argument should be a path to a directory.
- The 'tables' argument must either be false (dump all tables) or a
- list of tables to dump. The 'skipTables' argument may be a list of tables
- to skip, but only when the 'tables' argument is false.
- """
-
- writer = xmlWriter.XMLWriter(fileOrPath, newlinestr=newlinestr)
- self._saveXML(writer, **kwargs)
- writer.close()
-
- def _saveXML(self, writer,
- writeVersion=True,
- quiet=None, tables=None, skipTables=None, splitTables=False,
- splitGlyphs=False, disassembleInstructions=True,
- bitmapGlyphDataFormat='raw'):
-
- if quiet is not None:
- deprecateArgument("quiet", "configure logging instead")
-
- self.disassembleInstructions = disassembleInstructions
- self.bitmapGlyphDataFormat = bitmapGlyphDataFormat
- if not tables:
- tables = list(self.keys())
- if "GlyphOrder" not in tables:
- tables = ["GlyphOrder"] + tables
- if skipTables:
- for tag in skipTables:
- if tag in tables:
- tables.remove(tag)
- numTables = len(tables)
-
- if writeVersion:
- from fontTools import version
- version = ".".join(version.split('.')[:2])
- writer.begintag("ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1],
- ttLibVersion=version)
- else:
- writer.begintag("ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1])
- writer.newline()
-
- # always splitTables if splitGlyphs is enabled
- splitTables = splitTables or splitGlyphs
-
- if not splitTables:
- writer.newline()
- else:
- path, ext = os.path.splitext(writer.filename)
- fileNameTemplate = path + ".%s" + ext
-
- for i in range(numTables):
- tag = tables[i]
- if splitTables:
- tablePath = fileNameTemplate % tagToIdentifier(tag)
- tableWriter = xmlWriter.XMLWriter(tablePath,
- newlinestr=writer.newlinestr)
- tableWriter.begintag("ttFont", ttLibVersion=version)
- tableWriter.newline()
- tableWriter.newline()
- writer.simpletag(tagToXML(tag), src=os.path.basename(tablePath))
- writer.newline()
- else:
- tableWriter = writer
- self._tableToXML(tableWriter, tag, splitGlyphs=splitGlyphs)
- if splitTables:
- tableWriter.endtag("ttFont")
- tableWriter.newline()
- tableWriter.close()
- writer.endtag("ttFont")
- writer.newline()
-
- def _tableToXML(self, writer, tag, quiet=None, splitGlyphs=False):
- if quiet is not None:
- deprecateArgument("quiet", "configure logging instead")
- if tag in self:
- table = self[tag]
- report = "Dumping '%s' table..." % tag
- else:
- report = "No '%s' table found." % tag
- log.info(report)
- if tag not in self:
- return
- xmlTag = tagToXML(tag)
- attrs = dict()
- if hasattr(table, "ERROR"):
- attrs['ERROR'] = "decompilation error"
- from .tables.DefaultTable import DefaultTable
- if table.__class__ == DefaultTable:
- attrs['raw'] = True
- writer.begintag(xmlTag, **attrs)
- writer.newline()
- if tag == "glyf":
- table.toXML(writer, self, splitGlyphs=splitGlyphs)
- else:
- table.toXML(writer, self)
- writer.endtag(xmlTag)
- writer.newline()
- writer.newline()
-
- def importXML(self, fileOrPath, quiet=None):
- """Import a TTX file (an XML-based text format), so as to recreate
- a font object.
- """
- if quiet is not None:
- deprecateArgument("quiet", "configure logging instead")
-
- if "maxp" in self and "post" in self:
- # Make sure the glyph order is loaded, as it otherwise gets
- # lost if the XML doesn't contain the glyph order, yet does
- # contain the table which was originally used to extract the
- # glyph names from (ie. 'post', 'cmap' or 'CFF ').
- self.getGlyphOrder()
-
- from fontTools.misc import xmlReader
-
- reader = xmlReader.XMLReader(fileOrPath, self)
- reader.read()
-
- def isLoaded(self, tag):
- """Return true if the table identified by ``tag`` has been
- decompiled and loaded into memory."""
- return tag in self.tables
-
- def has_key(self, tag):
- """Test if the table identified by ``tag`` is present in the font.
-
- As well as this method, ``tag in font`` can also be used to determine the
- presence of the table."""
- if self.isLoaded(tag):
- return True
- elif self.reader and tag in self.reader:
- return True
- elif tag == "GlyphOrder":
- return True
- else:
- return False
-
- __contains__ = has_key
-
- def keys(self):
- """Returns the list of tables in the font, along with the ``GlyphOrder`` pseudo-table."""
- keys = list(self.tables.keys())
- if self.reader:
- for key in list(self.reader.keys()):
- if key not in keys:
- keys.append(key)
-
- if "GlyphOrder" in keys:
- keys.remove("GlyphOrder")
- keys = sortedTagList(keys)
- return ["GlyphOrder"] + keys
-
- def ensureDecompiled(self, recurse=None):
- """Decompile all the tables, even if a TTFont was opened in 'lazy' mode."""
- for tag in self.keys():
- table = self[tag]
- if recurse is None:
- recurse = self.lazy is not False
- if recurse and hasattr(table, "ensureDecompiled"):
- table.ensureDecompiled(recurse=recurse)
- self.lazy = False
-
- def __len__(self):
- return len(list(self.keys()))
-
- def __getitem__(self, tag):
- tag = Tag(tag)
- table = self.tables.get(tag)
- if table is None:
- if tag == "GlyphOrder":
- table = GlyphOrder(tag)
- self.tables[tag] = table
- elif self.reader is not None:
- table = self._readTable(tag)
- else:
- raise KeyError("'%s' table not found" % tag)
- return table
-
- def _readTable(self, tag):
- log.debug("Reading '%s' table from disk", tag)
- data = self.reader[tag]
- if self._tableCache is not None:
- table = self._tableCache.get((tag, data))
- if table is not None:
- return table
- tableClass = getTableClass(tag)
- table = tableClass(tag)
- self.tables[tag] = table
- log.debug("Decompiling '%s' table", tag)
- try:
- table.decompile(data, self)
- except Exception:
- if not self.ignoreDecompileErrors:
- raise
- # fall back to DefaultTable, retaining the binary table data
- log.exception(
- "An exception occurred during the decompilation of the '%s' table", tag)
- from .tables.DefaultTable import DefaultTable
- file = StringIO()
- traceback.print_exc(file=file)
- table = DefaultTable(tag)
- table.ERROR = file.getvalue()
- self.tables[tag] = table
- table.decompile(data, self)
- if self._tableCache is not None:
- self._tableCache[(tag, data)] = table
- return table
-
- def __setitem__(self, tag, table):
- self.tables[Tag(tag)] = table
-
- def __delitem__(self, tag):
- if tag not in self:
- raise KeyError("'%s' table not found" % tag)
- if tag in self.tables:
- del self.tables[tag]
- if self.reader and tag in self.reader:
- del self.reader[tag]
-
- def get(self, tag, default=None):
- """Returns the table if it exists or (optionally) a default if it doesn't."""
- try:
- return self[tag]
- except KeyError:
- return default
-
- def setGlyphOrder(self, glyphOrder):
- """Set the glyph order
-
- Args:
- glyphOrder ([str]): List of glyph names in order.
- """
- self.glyphOrder = glyphOrder
- if hasattr(self, '_reverseGlyphOrderDict'):
- del self._reverseGlyphOrderDict
- if self.isLoaded("glyf"):
- self["glyf"].setGlyphOrder(glyphOrder)
-
- def getGlyphOrder(self):
- """Returns a list of glyph names ordered by their position in the font."""
- try:
- return self.glyphOrder
- except AttributeError:
- pass
- if 'CFF ' in self:
- cff = self['CFF ']
- self.glyphOrder = cff.getGlyphOrder()
- elif 'post' in self:
- # TrueType font
- glyphOrder = self['post'].getGlyphOrder()
- if glyphOrder is None:
- #
- # No names found in the 'post' table.
- # Try to create glyph names from the unicode cmap (if available)
- # in combination with the Adobe Glyph List (AGL).
- #
- self._getGlyphNamesFromCmap()
- else:
- self.glyphOrder = glyphOrder
- else:
- self._getGlyphNamesFromCmap()
- return self.glyphOrder
-
- def _getGlyphNamesFromCmap(self):
- #
- # This is rather convoluted, but then again, it's an interesting problem:
- # - we need to use the unicode values found in the cmap table to
- # build glyph names (eg. because there is only a minimal post table,
- # or none at all).
- # - but the cmap parser also needs glyph names to work with...
- # So here's what we do:
- # - make up glyph names based on glyphID
- # - load a temporary cmap table based on those names
- # - extract the unicode values, build the "real" glyph names
- # - unload the temporary cmap table
- #
- if self.isLoaded("cmap"):
- # Bootstrapping: we're getting called by the cmap parser
- # itself. This means self.tables['cmap'] contains a partially
- # loaded cmap, making it impossible to get at a unicode
- # subtable here. We remove the partially loaded cmap and
- # restore it later.
- # This only happens if the cmap table is loaded before any
- # other table that does f.getGlyphOrder() or f.getGlyphName().
- cmapLoading = self.tables['cmap']
- del self.tables['cmap']
- else:
- cmapLoading = None
- # Make up glyph names based on glyphID, which will be used by the
- # temporary cmap and by the real cmap in case we don't find a unicode
- # cmap.
- numGlyphs = int(self['maxp'].numGlyphs)
- glyphOrder = [None] * numGlyphs
- glyphOrder[0] = ".notdef"
- for i in range(1, numGlyphs):
- glyphOrder[i] = "glyph%.5d" % i
- # Set the glyph order, so the cmap parser has something
- # to work with (so we don't get called recursively).
- self.glyphOrder = glyphOrder
-
- # Make up glyph names based on the reversed cmap table. Because some
- # glyphs (eg. ligatures or alternates) may not be reachable via cmap,
- # this naming table will usually not cover all glyphs in the font.
- # If the font has no Unicode cmap table, reversecmap will be empty.
- if 'cmap' in self:
- reversecmap = self['cmap'].buildReversed()
- else:
- reversecmap = {}
- useCount = {}
- for i in range(numGlyphs):
- tempName = glyphOrder[i]
- if tempName in reversecmap:
- # If a font maps both U+0041 LATIN CAPITAL LETTER A and
- # U+0391 GREEK CAPITAL LETTER ALPHA to the same glyph,
- # we prefer naming the glyph as "A".
- glyphName = self._makeGlyphName(min(reversecmap[tempName]))
- numUses = useCount[glyphName] = useCount.get(glyphName, 0) + 1
- if numUses > 1:
- glyphName = "%s.alt%d" % (glyphName, numUses - 1)
- glyphOrder[i] = glyphName
-
- if 'cmap' in self:
- # Delete the temporary cmap table from the cache, so it can
- # be parsed again with the right names.
- del self.tables['cmap']
- self.glyphOrder = glyphOrder
- if cmapLoading:
- # restore partially loaded cmap, so it can continue loading
- # using the proper names.
- self.tables['cmap'] = cmapLoading
-
- @staticmethod
- def _makeGlyphName(codepoint):
- from fontTools import agl # Adobe Glyph List
- if codepoint in agl.UV2AGL:
- return agl.UV2AGL[codepoint]
- elif codepoint <= 0xFFFF:
- return "uni%04X" % codepoint
- else:
- return "u%X" % codepoint
-
- def getGlyphNames(self):
- """Get a list of glyph names, sorted alphabetically."""
- glyphNames = sorted(self.getGlyphOrder())
- return glyphNames
-
- def getGlyphNames2(self):
- """Get a list of glyph names, sorted alphabetically,
- but not case sensitive.
- """
- from fontTools.misc import textTools
- return textTools.caselessSort(self.getGlyphOrder())
-
- def getGlyphName(self, glyphID):
- """Returns the name for the glyph with the given ID.
-
- If no name is available, synthesises one with the form ``glyphXXXXX``` where
- ```XXXXX`` is the zero-padded glyph ID.
- """
- try:
- return self.getGlyphOrder()[glyphID]
- except IndexError:
- return "glyph%.5d" % glyphID
-
- def getGlyphNameMany(self, lst):
- """Converts a list of glyph IDs into a list of glyph names."""
- glyphOrder = self.getGlyphOrder();
- cnt = len(glyphOrder)
- return [glyphOrder[gid] if gid < cnt else "glyph%.5d" % gid
- for gid in lst]
-
- def getGlyphID(self, glyphName):
- """Returns the ID of the glyph with the given name."""
- try:
- return self.getReverseGlyphMap()[glyphName]
- except KeyError:
- if glyphName[:5] == "glyph":
- try:
- return int(glyphName[5:])
- except (NameError, ValueError):
- raise KeyError(glyphName)
-
- def getGlyphIDMany(self, lst):
- """Converts a list of glyph names into a list of glyph IDs."""
- d = self.getReverseGlyphMap()
- try:
- return [d[glyphName] for glyphName in lst]
- except KeyError:
- getGlyphID = self.getGlyphID
- return [getGlyphID(glyphName) for glyphName in lst]
-
- def getReverseGlyphMap(self, rebuild=False):
- """Returns a mapping of glyph names to glyph IDs."""
- if rebuild or not hasattr(self, "_reverseGlyphOrderDict"):
- self._buildReverseGlyphOrderDict()
- return self._reverseGlyphOrderDict
-
- def _buildReverseGlyphOrderDict(self):
- self._reverseGlyphOrderDict = d = {}
- for glyphID,glyphName in enumerate(self.getGlyphOrder()):
- d[glyphName] = glyphID
- return d
-
- def _writeTable(self, tag, writer, done, tableCache=None):
- """Internal helper function for self.save(). Keeps track of
- inter-table dependencies.
- """
- if tag in done:
- return
- tableClass = getTableClass(tag)
- for masterTable in tableClass.dependencies:
- if masterTable not in done:
- if masterTable in self:
- self._writeTable(masterTable, writer, done, tableCache)
- else:
- done.append(masterTable)
- done.append(tag)
- tabledata = self.getTableData(tag)
- if tableCache is not None:
- entry = tableCache.get((Tag(tag), tabledata))
- if entry is not None:
- log.debug("reusing '%s' table", tag)
- writer.setEntry(tag, entry)
- return
- log.debug("Writing '%s' table to disk", tag)
- writer[tag] = tabledata
- if tableCache is not None:
- tableCache[(Tag(tag), tabledata)] = writer[tag]
-
- def getTableData(self, tag):
- """Returns the binary representation of a table.
-
- If the table is currently loaded and in memory, the data is compiled to
- binary and returned; if it is not currently loaded, the binary data is
- read from the font file and returned.
- """
- tag = Tag(tag)
- if self.isLoaded(tag):
- log.debug("Compiling '%s' table", tag)
- return self.tables[tag].compile(self)
- elif self.reader and tag in self.reader:
- log.debug("Reading '%s' table from disk", tag)
- return self.reader[tag]
- else:
- raise KeyError(tag)
-
- def getGlyphSet(self, preferCFF=True, location=None, normalized=False):
- """Return a generic GlyphSet, which is a dict-like object
- mapping glyph names to glyph objects. The returned glyph objects
- have a .draw() method that supports the Pen protocol, and will
- have an attribute named 'width'.
-
- If the font is CFF-based, the outlines will be taken from the 'CFF ' or
- 'CFF2' tables. Otherwise the outlines will be taken from the 'glyf' table.
- If the font contains both a 'CFF '/'CFF2' and a 'glyf' table, you can use
- the 'preferCFF' argument to specify which one should be taken. If the
- font contains both a 'CFF ' and a 'CFF2' table, the latter is taken.
-
- If the 'location' parameter is set, it should be a dictionary mapping
- four-letter variation tags to their float values, and the returned
- glyph-set will represent an instance of a variable font at that location.
- If the 'normalized' variable is set to True, that location is interpretted
- as in the normalized (-1..+1) space, otherwise it is in the font's defined
- axes space.
- """
- glyphs = None
- if (preferCFF and any(tb in self for tb in ["CFF ", "CFF2"]) or
- ("glyf" not in self and any(tb in self for tb in ["CFF ", "CFF2"]))):
- table_tag = "CFF2" if "CFF2" in self else "CFF "
- if location:
- raise NotImplementedError # TODO
- glyphs = _TTGlyphSet(self,
- list(self[table_tag].cff.values())[0].CharStrings, _TTGlyphCFF)
-
- if glyphs is None and "glyf" in self:
- if location and 'gvar' in self:
- glyphs = _TTVarGlyphSet(self, location=location, normalized=normalized)
- else:
- glyphs = _TTGlyphSet(self, self["glyf"], _TTGlyphGlyf)
-
- if glyphs is None:
- raise TTLibError("Font contains no outlines")
-
- return glyphs
-
- def getBestCmap(self, cmapPreferences=((3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0))):
- """Returns the 'best' Unicode cmap dictionary available in the font
- or ``None``, if no Unicode cmap subtable is available.
-
- By default it will search for the following (platformID, platEncID)
- pairs in order::
-
- (3, 10), # Windows Unicode full repertoire
- (0, 6), # Unicode full repertoire (format 13 subtable)
- (0, 4), # Unicode 2.0 full repertoire
- (3, 1), # Windows Unicode BMP
- (0, 3), # Unicode 2.0 BMP
- (0, 2), # Unicode ISO/IEC 10646
- (0, 1), # Unicode 1.1
- (0, 0) # Unicode 1.0
-
- This particular order matches what HarfBuzz uses to choose what
- subtable to use by default. This order prefers the largest-repertoire
- subtable, and among those, prefers the Windows-platform over the
- Unicode-platform as the former has wider support.
-
- This order can be customized via the ``cmapPreferences`` argument.
- """
- return self["cmap"].getBestCmap(cmapPreferences=cmapPreferences)
+ """Represents a TrueType font.
+
+ The object manages file input and output, and offers a convenient way of
+ accessing tables. Tables will be only decompiled when necessary, ie. when
+ they're actually accessed. This means that simple operations can be extremely fast.
+
+ Example usage::
+
+ >> from fontTools import ttLib
+ >> tt = ttLib.TTFont("afont.ttf") # Load an existing font file
+ >> tt['maxp'].numGlyphs
+ 242
+ >> tt['OS/2'].achVendID
+ 'B&H\000'
+ >> tt['head'].unitsPerEm
+ 2048
+
+ For details of the objects returned when accessing each table, see :ref:`tables`.
+ To add a table to the font, use the :py:func:`newTable` function::
+
+ >> os2 = newTable("OS/2")
+ >> os2.version = 4
+ >> # set other attributes
+ >> font["OS/2"] = os2
+
+ TrueType fonts can also be serialized to and from XML format (see also the
+ :ref:`ttx` binary)::
+
+ >> tt.saveXML("afont.ttx")
+ Dumping 'LTSH' table...
+ Dumping 'OS/2' table...
+ [...]
+
+ >> tt2 = ttLib.TTFont() # Create a new font object
+ >> tt2.importXML("afont.ttx")
+ >> tt2['maxp'].numGlyphs
+ 242
+
+ The TTFont object may be used as a context manager; this will cause the file
+ reader to be closed after the context ``with`` block is exited::
+
+ with TTFont(filename) as f:
+ # Do stuff
+
+ Args:
+ file: When reading a font from disk, either a pathname pointing to a file,
+ or a readable file object.
+ res_name_or_index: If running on a Macintosh, either a sfnt resource name or
+ an sfnt resource index number. If the index number is zero, TTLib will
+ autodetect whether the file is a flat file or a suitcase. (If it is a suitcase,
+ only the first 'sfnt' resource will be read.)
+ sfntVersion (str): When constructing a font object from scratch, sets the four-byte
+ sfnt magic number to be used. Defaults to ``\0\1\0\0`` (TrueType). To create
+ an OpenType file, use ``OTTO``.
+ flavor (str): Set this to ``woff`` when creating a WOFF file or ``woff2`` for a WOFF2
+ file.
+ checkChecksums (int): How checksum data should be treated. Default is 0
+ (no checking). Set to 1 to check and warn on wrong checksums; set to 2 to
+ raise an exception if any wrong checksums are found.
+ recalcBBoxes (bool): If true (the default), recalculates ``glyf``, ``CFF ``,
+ ``head`` bounding box values and ``hhea``/``vhea`` min/max values on save.
+ Also compiles the glyphs on importing, which saves memory consumption and
+ time.
+ ignoreDecompileErrors (bool): If true, exceptions raised during table decompilation
+ will be ignored, and the binary data will be returned for those tables instead.
+ recalcTimestamp (bool): If true (the default), sets the ``modified`` timestamp in
+ the ``head`` table on save.
+ fontNumber (int): The index of the font in a TrueType Collection file.
+ lazy (bool): If lazy is set to True, many data structures are loaded lazily, upon
+ access only. If it is set to False, many data structures are loaded immediately.
+ The default is ``lazy=None`` which is somewhere in between.
+ """
+
+ def __init__(
+ self,
+ file=None,
+ res_name_or_index=None,
+ sfntVersion="\000\001\000\000",
+ flavor=None,
+ checkChecksums=0,
+ verbose=None,
+ recalcBBoxes=True,
+ allowVID=NotImplemented,
+ ignoreDecompileErrors=False,
+ recalcTimestamp=True,
+ fontNumber=-1,
+ lazy=None,
+ quiet=None,
+ _tableCache=None,
+ cfg={},
+ ):
+ for name in ("verbose", "quiet"):
+ val = locals().get(name)
+ if val is not None:
+ deprecateArgument(name, "configure logging instead")
+ setattr(self, name, val)
+
+ self.lazy = lazy
+ self.recalcBBoxes = recalcBBoxes
+ self.recalcTimestamp = recalcTimestamp
+ self.tables = {}
+ self.reader = None
+ self.cfg = cfg.copy() if isinstance(cfg, AbstractConfig) else Config(cfg)
+ self.ignoreDecompileErrors = ignoreDecompileErrors
+
+ if not file:
+ self.sfntVersion = sfntVersion
+ self.flavor = flavor
+ self.flavorData = None
+ return
+ seekable = True
+ if not hasattr(file, "read"):
+ closeStream = True
+ # assume file is a string
+ if res_name_or_index is not None:
+ # see if it contains 'sfnt' resources in the resource or data fork
+ from . import macUtils
+
+ if res_name_or_index == 0:
+ if macUtils.getSFNTResIndices(file):
+ # get the first available sfnt font.
+ file = macUtils.SFNTResourceReader(file, 1)
+ else:
+ file = open(file, "rb")
+ else:
+ file = macUtils.SFNTResourceReader(file, res_name_or_index)
+ else:
+ file = open(file, "rb")
+ else:
+ # assume "file" is a readable file object
+ closeStream = False
+ # SFNTReader wants the input file to be seekable.
+ # SpooledTemporaryFile has no seekable() on < 3.11, but still can seek:
+ # https://github.com/fonttools/fonttools/issues/3052
+ if hasattr(file, "seekable"):
+ seekable = file.seekable()
+ elif hasattr(file, "seek"):
+ try:
+ file.seek(0)
+ except UnsupportedOperation:
+ seekable = False
+
+ if not self.lazy:
+ # read input file in memory and wrap a stream around it to allow overwriting
+ if seekable:
+ file.seek(0)
+ tmp = BytesIO(file.read())
+ if hasattr(file, "name"):
+ # save reference to input file name
+ tmp.name = file.name
+ if closeStream:
+ file.close()
+ file = tmp
+ elif not seekable:
+ raise TTLibError("Input file must be seekable when lazy=True")
+ self._tableCache = _tableCache
+ self.reader = SFNTReader(file, checkChecksums, fontNumber=fontNumber)
+ self.sfntVersion = self.reader.sfntVersion
+ self.flavor = self.reader.flavor
+ self.flavorData = self.reader.flavorData
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.close()
+
+ def close(self):
+ """If we still have a reader object, close it."""
+ if self.reader is not None:
+ self.reader.close()
+
+ def save(self, file, reorderTables=True):
+ """Save the font to disk.
+
+ Args:
+ file: Similarly to the constructor, can be either a pathname or a writable
+ file object.
+ reorderTables (Option[bool]): If true (the default), reorder the tables,
+ sorting them by tag (recommended by the OpenType specification). If
+ false, retain the original font order. If None, reorder by table
+ dependency (fastest).
+ """
+ if not hasattr(file, "write"):
+ if self.lazy and self.reader.file.name == file:
+ raise TTLibError("Can't overwrite TTFont when 'lazy' attribute is True")
+ createStream = True
+ else:
+ # assume "file" is a writable file object
+ createStream = False
+
+ tmp = BytesIO()
+
+ writer_reordersTables = self._save(tmp)
+
+ if not (
+ reorderTables is None
+ or writer_reordersTables
+ or (reorderTables is False and self.reader is None)
+ ):
+ if reorderTables is False:
+ # sort tables using the original font's order
+ tableOrder = list(self.reader.keys())
+ else:
+ # use the recommended order from the OpenType specification
+ tableOrder = None
+ tmp.flush()
+ tmp2 = BytesIO()
+ reorderFontTables(tmp, tmp2, tableOrder)
+ tmp.close()
+ tmp = tmp2
+
+ if createStream:
+ # "file" is a path
+ with open(file, "wb") as file:
+ file.write(tmp.getvalue())
+ else:
+ file.write(tmp.getvalue())
+
+ tmp.close()
+
+ def _save(self, file, tableCache=None):
+ """Internal function, to be shared by save() and TTCollection.save()"""
+
+ if self.recalcTimestamp and "head" in self:
+ self[
+ "head"
+ ] # make sure 'head' is loaded so the recalculation is actually done
+
+ tags = list(self.keys())
+ if "GlyphOrder" in tags:
+ tags.remove("GlyphOrder")
+ numTables = len(tags)
+ # write to a temporary stream to allow saving to unseekable streams
+ writer = SFNTWriter(
+ file, numTables, self.sfntVersion, self.flavor, self.flavorData
+ )
+
+ done = []
+ for tag in tags:
+ self._writeTable(tag, writer, done, tableCache)
+
+ writer.close()
+
+ return writer.reordersTables()
+
+ def saveXML(self, fileOrPath, newlinestr="\n", **kwargs):
+ """Export the font as TTX (an XML-based text file), or as a series of text
+ files when splitTables is true. In the latter case, the 'fileOrPath'
+ argument should be a path to a directory.
+ The 'tables' argument must either be false (dump all tables) or a
+ list of tables to dump. The 'skipTables' argument may be a list of tables
+ to skip, but only when the 'tables' argument is false.
+ """
+
+ writer = xmlWriter.XMLWriter(fileOrPath, newlinestr=newlinestr)
+ self._saveXML(writer, **kwargs)
+ writer.close()
+
+ def _saveXML(
+ self,
+ writer,
+ writeVersion=True,
+ quiet=None,
+ tables=None,
+ skipTables=None,
+ splitTables=False,
+ splitGlyphs=False,
+ disassembleInstructions=True,
+ bitmapGlyphDataFormat="raw",
+ ):
+ if quiet is not None:
+ deprecateArgument("quiet", "configure logging instead")
+
+ self.disassembleInstructions = disassembleInstructions
+ self.bitmapGlyphDataFormat = bitmapGlyphDataFormat
+ if not tables:
+ tables = list(self.keys())
+ if "GlyphOrder" not in tables:
+ tables = ["GlyphOrder"] + tables
+ if skipTables:
+ for tag in skipTables:
+ if tag in tables:
+ tables.remove(tag)
+ numTables = len(tables)
+
+ if writeVersion:
+ from fontTools import version
+
+ version = ".".join(version.split(".")[:2])
+ writer.begintag(
+ "ttFont",
+ sfntVersion=repr(tostr(self.sfntVersion))[1:-1],
+ ttLibVersion=version,
+ )
+ else:
+ writer.begintag("ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1])
+ writer.newline()
+
+ # always splitTables if splitGlyphs is enabled
+ splitTables = splitTables or splitGlyphs
+
+ if not splitTables:
+ writer.newline()
+ else:
+ path, ext = os.path.splitext(writer.filename)
+
+ for i in range(numTables):
+ tag = tables[i]
+ if splitTables:
+ tablePath = path + "." + tagToIdentifier(tag) + ext
+ tableWriter = xmlWriter.XMLWriter(
+ tablePath, newlinestr=writer.newlinestr
+ )
+ tableWriter.begintag("ttFont", ttLibVersion=version)
+ tableWriter.newline()
+ tableWriter.newline()
+ writer.simpletag(tagToXML(tag), src=os.path.basename(tablePath))
+ writer.newline()
+ else:
+ tableWriter = writer
+ self._tableToXML(tableWriter, tag, splitGlyphs=splitGlyphs)
+ if splitTables:
+ tableWriter.endtag("ttFont")
+ tableWriter.newline()
+ tableWriter.close()
+ writer.endtag("ttFont")
+ writer.newline()
+
+ def _tableToXML(self, writer, tag, quiet=None, splitGlyphs=False):
+ if quiet is not None:
+ deprecateArgument("quiet", "configure logging instead")
+ if tag in self:
+ table = self[tag]
+ report = "Dumping '%s' table..." % tag
+ else:
+ report = "No '%s' table found." % tag
+ log.info(report)
+ if tag not in self:
+ return
+ xmlTag = tagToXML(tag)
+ attrs = dict()
+ if hasattr(table, "ERROR"):
+ attrs["ERROR"] = "decompilation error"
+ from .tables.DefaultTable import DefaultTable
+
+ if table.__class__ == DefaultTable:
+ attrs["raw"] = True
+ writer.begintag(xmlTag, **attrs)
+ writer.newline()
+ if tag == "glyf":
+ table.toXML(writer, self, splitGlyphs=splitGlyphs)
+ else:
+ table.toXML(writer, self)
+ writer.endtag(xmlTag)
+ writer.newline()
+ writer.newline()
+
+ def importXML(self, fileOrPath, quiet=None):
+ """Import a TTX file (an XML-based text format), so as to recreate
+ a font object.
+ """
+ if quiet is not None:
+ deprecateArgument("quiet", "configure logging instead")
+
+ if "maxp" in self and "post" in self:
+ # Make sure the glyph order is loaded, as it otherwise gets
+ # lost if the XML doesn't contain the glyph order, yet does
+ # contain the table which was originally used to extract the
+ # glyph names from (ie. 'post', 'cmap' or 'CFF ').
+ self.getGlyphOrder()
+
+ from fontTools.misc import xmlReader
+
+ reader = xmlReader.XMLReader(fileOrPath, self)
+ reader.read()
+
+ def isLoaded(self, tag):
+ """Return true if the table identified by ``tag`` has been
+ decompiled and loaded into memory."""
+ return tag in self.tables
+
+ def has_key(self, tag):
+ """Test if the table identified by ``tag`` is present in the font.
+
+ As well as this method, ``tag in font`` can also be used to determine the
+ presence of the table."""
+ if self.isLoaded(tag):
+ return True
+ elif self.reader and tag in self.reader:
+ return True
+ elif tag == "GlyphOrder":
+ return True
+ else:
+ return False
+
+ __contains__ = has_key
+
+ def keys(self):
+ """Returns the list of tables in the font, along with the ``GlyphOrder`` pseudo-table."""
+ keys = list(self.tables.keys())
+ if self.reader:
+ for key in list(self.reader.keys()):
+ if key not in keys:
+ keys.append(key)
+
+ if "GlyphOrder" in keys:
+ keys.remove("GlyphOrder")
+ keys = sortedTagList(keys)
+ return ["GlyphOrder"] + keys
+
+ def ensureDecompiled(self, recurse=None):
+ """Decompile all the tables, even if a TTFont was opened in 'lazy' mode."""
+ for tag in self.keys():
+ table = self[tag]
+ if recurse is None:
+ recurse = self.lazy is not False
+ if recurse and hasattr(table, "ensureDecompiled"):
+ table.ensureDecompiled(recurse=recurse)
+ self.lazy = False
+
+ def __len__(self):
+ return len(list(self.keys()))
+
+ def __getitem__(self, tag):
+ tag = Tag(tag)
+ table = self.tables.get(tag)
+ if table is None:
+ if tag == "GlyphOrder":
+ table = GlyphOrder(tag)
+ self.tables[tag] = table
+ elif self.reader is not None:
+ table = self._readTable(tag)
+ else:
+ raise KeyError("'%s' table not found" % tag)
+ return table
+
+ def _readTable(self, tag):
+ log.debug("Reading '%s' table from disk", tag)
+ data = self.reader[tag]
+ if self._tableCache is not None:
+ table = self._tableCache.get((tag, data))
+ if table is not None:
+ return table
+ tableClass = getTableClass(tag)
+ table = tableClass(tag)
+ self.tables[tag] = table
+ log.debug("Decompiling '%s' table", tag)
+ try:
+ table.decompile(data, self)
+ except Exception:
+ if not self.ignoreDecompileErrors:
+ raise
+ # fall back to DefaultTable, retaining the binary table data
+ log.exception(
+ "An exception occurred during the decompilation of the '%s' table", tag
+ )
+ from .tables.DefaultTable import DefaultTable
+
+ file = StringIO()
+ traceback.print_exc(file=file)
+ table = DefaultTable(tag)
+ table.ERROR = file.getvalue()
+ self.tables[tag] = table
+ table.decompile(data, self)
+ if self._tableCache is not None:
+ self._tableCache[(tag, data)] = table
+ return table
+
+ def __setitem__(self, tag, table):
+ self.tables[Tag(tag)] = table
+
+ def __delitem__(self, tag):
+ if tag not in self:
+ raise KeyError("'%s' table not found" % tag)
+ if tag in self.tables:
+ del self.tables[tag]
+ if self.reader and tag in self.reader:
+ del self.reader[tag]
+
+ def get(self, tag, default=None):
+ """Returns the table if it exists or (optionally) a default if it doesn't."""
+ try:
+ return self[tag]
+ except KeyError:
+ return default
+
+ def setGlyphOrder(self, glyphOrder):
+ """Set the glyph order
+
+ Args:
+ glyphOrder ([str]): List of glyph names in order.
+ """
+ self.glyphOrder = glyphOrder
+ if hasattr(self, "_reverseGlyphOrderDict"):
+ del self._reverseGlyphOrderDict
+ if self.isLoaded("glyf"):
+ self["glyf"].setGlyphOrder(glyphOrder)
+
+ def getGlyphOrder(self):
+ """Returns a list of glyph names ordered by their position in the font."""
+ try:
+ return self.glyphOrder
+ except AttributeError:
+ pass
+ if "CFF " in self:
+ cff = self["CFF "]
+ self.glyphOrder = cff.getGlyphOrder()
+ elif "post" in self:
+ # TrueType font
+ glyphOrder = self["post"].getGlyphOrder()
+ if glyphOrder is None:
+ #
+ # No names found in the 'post' table.
+ # Try to create glyph names from the unicode cmap (if available)
+ # in combination with the Adobe Glyph List (AGL).
+ #
+ self._getGlyphNamesFromCmap()
+ elif len(glyphOrder) < self["maxp"].numGlyphs:
+ #
+ # Not enough names found in the 'post' table.
+ # Can happen when 'post' format 1 is improperly used on a font that
+ # has more than 258 glyphs (the lenght of 'standardGlyphOrder').
+ #
+ log.warning(
+ "Not enough names found in the 'post' table, generating them from cmap instead"
+ )
+ self._getGlyphNamesFromCmap()
+ else:
+ self.glyphOrder = glyphOrder
+ else:
+ self._getGlyphNamesFromCmap()
+ return self.glyphOrder
+
+ def _getGlyphNamesFromCmap(self):
+ #
+ # This is rather convoluted, but then again, it's an interesting problem:
+ # - we need to use the unicode values found in the cmap table to
+ # build glyph names (eg. because there is only a minimal post table,
+ # or none at all).
+ # - but the cmap parser also needs glyph names to work with...
+ # So here's what we do:
+ # - make up glyph names based on glyphID
+ # - load a temporary cmap table based on those names
+ # - extract the unicode values, build the "real" glyph names
+ # - unload the temporary cmap table
+ #
+ if self.isLoaded("cmap"):
+ # Bootstrapping: we're getting called by the cmap parser
+ # itself. This means self.tables['cmap'] contains a partially
+ # loaded cmap, making it impossible to get at a unicode
+ # subtable here. We remove the partially loaded cmap and
+ # restore it later.
+ # This only happens if the cmap table is loaded before any
+ # other table that does f.getGlyphOrder() or f.getGlyphName().
+ cmapLoading = self.tables["cmap"]
+ del self.tables["cmap"]
+ else:
+ cmapLoading = None
+ # Make up glyph names based on glyphID, which will be used by the
+ # temporary cmap and by the real cmap in case we don't find a unicode
+ # cmap.
+ numGlyphs = int(self["maxp"].numGlyphs)
+ glyphOrder = [None] * numGlyphs
+ glyphOrder[0] = ".notdef"
+ for i in range(1, numGlyphs):
+ glyphOrder[i] = "glyph%.5d" % i
+ # Set the glyph order, so the cmap parser has something
+ # to work with (so we don't get called recursively).
+ self.glyphOrder = glyphOrder
+
+ # Make up glyph names based on the reversed cmap table. Because some
+ # glyphs (eg. ligatures or alternates) may not be reachable via cmap,
+ # this naming table will usually not cover all glyphs in the font.
+ # If the font has no Unicode cmap table, reversecmap will be empty.
+ if "cmap" in self:
+ reversecmap = self["cmap"].buildReversed()
+ else:
+ reversecmap = {}
+ useCount = {}
+ for i in range(numGlyphs):
+ tempName = glyphOrder[i]
+ if tempName in reversecmap:
+ # If a font maps both U+0041 LATIN CAPITAL LETTER A and
+ # U+0391 GREEK CAPITAL LETTER ALPHA to the same glyph,
+ # we prefer naming the glyph as "A".
+ glyphName = self._makeGlyphName(min(reversecmap[tempName]))
+ numUses = useCount[glyphName] = useCount.get(glyphName, 0) + 1
+ if numUses > 1:
+ glyphName = "%s.alt%d" % (glyphName, numUses - 1)
+ glyphOrder[i] = glyphName
+
+ if "cmap" in self:
+ # Delete the temporary cmap table from the cache, so it can
+ # be parsed again with the right names.
+ del self.tables["cmap"]
+ self.glyphOrder = glyphOrder
+ if cmapLoading:
+ # restore partially loaded cmap, so it can continue loading
+ # using the proper names.
+ self.tables["cmap"] = cmapLoading
+
+ @staticmethod
+ def _makeGlyphName(codepoint):
+ from fontTools import agl # Adobe Glyph List
+
+ if codepoint in agl.UV2AGL:
+ return agl.UV2AGL[codepoint]
+ elif codepoint <= 0xFFFF:
+ return "uni%04X" % codepoint
+ else:
+ return "u%X" % codepoint
+
+ def getGlyphNames(self):
+ """Get a list of glyph names, sorted alphabetically."""
+ glyphNames = sorted(self.getGlyphOrder())
+ return glyphNames
+
+ def getGlyphNames2(self):
+ """Get a list of glyph names, sorted alphabetically,
+ but not case sensitive.
+ """
+ from fontTools.misc import textTools
+
+ return textTools.caselessSort(self.getGlyphOrder())
+
+ def getGlyphName(self, glyphID):
+ """Returns the name for the glyph with the given ID.
+
+ If no name is available, synthesises one with the form ``glyphXXXXX``` where
+ ```XXXXX`` is the zero-padded glyph ID.
+ """
+ try:
+ return self.getGlyphOrder()[glyphID]
+ except IndexError:
+ return "glyph%.5d" % glyphID
+
+ def getGlyphNameMany(self, lst):
+ """Converts a list of glyph IDs into a list of glyph names."""
+ glyphOrder = self.getGlyphOrder()
+ cnt = len(glyphOrder)
+ return [glyphOrder[gid] if gid < cnt else "glyph%.5d" % gid for gid in lst]
+
+ def getGlyphID(self, glyphName):
+ """Returns the ID of the glyph with the given name."""
+ try:
+ return self.getReverseGlyphMap()[glyphName]
+ except KeyError:
+ if glyphName[:5] == "glyph":
+ try:
+ return int(glyphName[5:])
+ except (NameError, ValueError):
+ raise KeyError(glyphName)
+ raise
+
+ def getGlyphIDMany(self, lst):
+ """Converts a list of glyph names into a list of glyph IDs."""
+ d = self.getReverseGlyphMap()
+ try:
+ return [d[glyphName] for glyphName in lst]
+ except KeyError:
+ getGlyphID = self.getGlyphID
+ return [getGlyphID(glyphName) for glyphName in lst]
+
+ def getReverseGlyphMap(self, rebuild=False):
+ """Returns a mapping of glyph names to glyph IDs."""
+ if rebuild or not hasattr(self, "_reverseGlyphOrderDict"):
+ self._buildReverseGlyphOrderDict()
+ return self._reverseGlyphOrderDict
+
+ def _buildReverseGlyphOrderDict(self):
+ self._reverseGlyphOrderDict = d = {}
+ for glyphID, glyphName in enumerate(self.getGlyphOrder()):
+ d[glyphName] = glyphID
+ return d
+
+ def _writeTable(self, tag, writer, done, tableCache=None):
+ """Internal helper function for self.save(). Keeps track of
+ inter-table dependencies.
+ """
+ if tag in done:
+ return
+ tableClass = getTableClass(tag)
+ for masterTable in tableClass.dependencies:
+ if masterTable not in done:
+ if masterTable in self:
+ self._writeTable(masterTable, writer, done, tableCache)
+ else:
+ done.append(masterTable)
+ done.append(tag)
+ tabledata = self.getTableData(tag)
+ if tableCache is not None:
+ entry = tableCache.get((Tag(tag), tabledata))
+ if entry is not None:
+ log.debug("reusing '%s' table", tag)
+ writer.setEntry(tag, entry)
+ return
+ log.debug("Writing '%s' table to disk", tag)
+ writer[tag] = tabledata
+ if tableCache is not None:
+ tableCache[(Tag(tag), tabledata)] = writer[tag]
+
+ def getTableData(self, tag):
+ """Returns the binary representation of a table.
+
+ If the table is currently loaded and in memory, the data is compiled to
+ binary and returned; if it is not currently loaded, the binary data is
+ read from the font file and returned.
+ """
+ tag = Tag(tag)
+ if self.isLoaded(tag):
+ log.debug("Compiling '%s' table", tag)
+ return self.tables[tag].compile(self)
+ elif self.reader and tag in self.reader:
+ log.debug("Reading '%s' table from disk", tag)
+ return self.reader[tag]
+ else:
+ raise KeyError(tag)
+
+ def getGlyphSet(self, preferCFF=True, location=None, normalized=False):
+ """Return a generic GlyphSet, which is a dict-like object
+ mapping glyph names to glyph objects. The returned glyph objects
+ have a ``.draw()`` method that supports the Pen protocol, and will
+ have an attribute named 'width'.
+
+ If the font is CFF-based, the outlines will be taken from the ``CFF ``
+ or ``CFF2`` tables. Otherwise the outlines will be taken from the
+ ``glyf`` table.
+
+ If the font contains both a ``CFF ``/``CFF2`` and a ``glyf`` table, you
+ can use the ``preferCFF`` argument to specify which one should be taken.
+ If the font contains both a ``CFF `` and a ``CFF2`` table, the latter is
+ taken.
+
+ If the ``location`` parameter is set, it should be a dictionary mapping
+ four-letter variation tags to their float values, and the returned
+ glyph-set will represent an instance of a variable font at that
+ location.
+
+ If the ``normalized`` variable is set to True, that location is
+ interpreted as in the normalized (-1..+1) space, otherwise it is in the
+ font's defined axes space.
+ """
+ if location and "fvar" not in self:
+ location = None
+ if location and not normalized:
+ location = self.normalizeLocation(location)
+ if ("CFF " in self or "CFF2" in self) and (preferCFF or "glyf" not in self):
+ return _TTGlyphSetCFF(self, location)
+ elif "glyf" in self:
+ return _TTGlyphSetGlyf(self, location)
+ else:
+ raise TTLibError("Font contains no outlines")
+
+ def normalizeLocation(self, location):
+ """Normalize a ``location`` from the font's defined axes space (also
+ known as user space) into the normalized (-1..+1) space. It applies
+ ``avar`` mapping if the font contains an ``avar`` table.
+
+ The ``location`` parameter should be a dictionary mapping four-letter
+ variation tags to their float values.
+
+ Raises ``TTLibError`` if the font is not a variable font.
+ """
+ from fontTools.varLib.models import normalizeLocation, piecewiseLinearMap
+
+ if "fvar" not in self:
+ raise TTLibError("Not a variable font")
+
+ axes = {
+ a.axisTag: (a.minValue, a.defaultValue, a.maxValue)
+ for a in self["fvar"].axes
+ }
+ location = normalizeLocation(location, axes)
+ if "avar" in self:
+ avar = self["avar"]
+ avarSegments = avar.segments
+ mappedLocation = {}
+ for axisTag, value in location.items():
+ avarMapping = avarSegments.get(axisTag, None)
+ if avarMapping is not None:
+ value = piecewiseLinearMap(value, avarMapping)
+ mappedLocation[axisTag] = value
+ location = mappedLocation
+ return location
+
+ def getBestCmap(
+ self,
+ cmapPreferences=(
+ (3, 10),
+ (0, 6),
+ (0, 4),
+ (3, 1),
+ (0, 3),
+ (0, 2),
+ (0, 1),
+ (0, 0),
+ ),
+ ):
+ """Returns the 'best' Unicode cmap dictionary available in the font
+ or ``None``, if no Unicode cmap subtable is available.
+
+ By default it will search for the following (platformID, platEncID)
+ pairs in order::
+
+ (3, 10), # Windows Unicode full repertoire
+ (0, 6), # Unicode full repertoire (format 13 subtable)
+ (0, 4), # Unicode 2.0 full repertoire
+ (3, 1), # Windows Unicode BMP
+ (0, 3), # Unicode 2.0 BMP
+ (0, 2), # Unicode ISO/IEC 10646
+ (0, 1), # Unicode 1.1
+ (0, 0) # Unicode 1.0
+
+ This particular order matches what HarfBuzz uses to choose what
+ subtable to use by default. This order prefers the largest-repertoire
+ subtable, and among those, prefers the Windows-platform over the
+ Unicode-platform as the former has wider support.
+
+ This order can be customized via the ``cmapPreferences`` argument.
+ """
+ return self["cmap"].getBestCmap(cmapPreferences=cmapPreferences)
class GlyphOrder(object):
- """A pseudo table. The glyph order isn't in the font as a separate
- table, but it's nice to present it as such in the TTX format.
- """
+ """A pseudo table. The glyph order isn't in the font as a separate
+ table, but it's nice to present it as such in the TTX format.
+ """
- def __init__(self, tag=None):
- pass
+ def __init__(self, tag=None):
+ pass
- def toXML(self, writer, ttFont):
- glyphOrder = ttFont.getGlyphOrder()
- writer.comment("The 'id' attribute is only for humans; "
- "it is ignored when parsed.")
- writer.newline()
- for i in range(len(glyphOrder)):
- glyphName = glyphOrder[i]
- writer.simpletag("GlyphID", id=i, name=glyphName)
- writer.newline()
+ def toXML(self, writer, ttFont):
+ glyphOrder = ttFont.getGlyphOrder()
+ writer.comment(
+ "The 'id' attribute is only for humans; " "it is ignored when parsed."
+ )
+ writer.newline()
+ for i in range(len(glyphOrder)):
+ glyphName = glyphOrder[i]
+ writer.simpletag("GlyphID", id=i, name=glyphName)
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- if not hasattr(self, "glyphOrder"):
- self.glyphOrder = []
- if name == "GlyphID":
- self.glyphOrder.append(attrs["name"])
- ttFont.setGlyphOrder(self.glyphOrder)
+ def fromXML(self, name, attrs, content, ttFont):
+ if not hasattr(self, "glyphOrder"):
+ self.glyphOrder = []
+ if name == "GlyphID":
+ self.glyphOrder.append(attrs["name"])
+ ttFont.setGlyphOrder(self.glyphOrder)
def getTableModule(tag):
- """Fetch the packer/unpacker module for a table.
- Return None when no module is found.
- """
- from . import tables
- pyTag = tagToIdentifier(tag)
- try:
- __import__("fontTools.ttLib.tables." + pyTag)
- except ImportError as err:
- # If pyTag is found in the ImportError message,
- # means table is not implemented. If it's not
- # there, then some other module is missing, don't
- # suppress the error.
- if str(err).find(pyTag) >= 0:
- return None
- else:
- raise err
- else:
- return getattr(tables, pyTag)
+ """Fetch the packer/unpacker module for a table.
+ Return None when no module is found.
+ """
+ from . import tables
+
+ pyTag = tagToIdentifier(tag)
+ try:
+ __import__("fontTools.ttLib.tables." + pyTag)
+ except ImportError as err:
+ # If pyTag is found in the ImportError message,
+ # means table is not implemented. If it's not
+ # there, then some other module is missing, don't
+ # suppress the error.
+ if str(err).find(pyTag) >= 0:
+ return None
+ else:
+ raise err
+ else:
+ return getattr(tables, pyTag)
# Registry for custom table packer/unpacker classes. Keys are table
@@ -800,221 +897,248 @@ _customTableRegistry = {}
def registerCustomTableClass(tag, moduleName, className=None):
- """Register a custom packer/unpacker class for a table.
+ """Register a custom packer/unpacker class for a table.
- The 'moduleName' must be an importable module. If no 'className'
- is given, it is derived from the tag, for example it will be
- ``table_C_U_S_T_`` for a 'CUST' tag.
+ The 'moduleName' must be an importable module. If no 'className'
+ is given, it is derived from the tag, for example it will be
+ ``table_C_U_S_T_`` for a 'CUST' tag.
- The registered table class should be a subclass of
- :py:class:`fontTools.ttLib.tables.DefaultTable.DefaultTable`
- """
- if className is None:
- className = "table_" + tagToIdentifier(tag)
- _customTableRegistry[tag] = (moduleName, className)
+ The registered table class should be a subclass of
+ :py:class:`fontTools.ttLib.tables.DefaultTable.DefaultTable`
+ """
+ if className is None:
+ className = "table_" + tagToIdentifier(tag)
+ _customTableRegistry[tag] = (moduleName, className)
def unregisterCustomTableClass(tag):
- """Unregister the custom packer/unpacker class for a table."""
- del _customTableRegistry[tag]
+ """Unregister the custom packer/unpacker class for a table."""
+ del _customTableRegistry[tag]
def getCustomTableClass(tag):
- """Return the custom table class for tag, if one has been registered
- with 'registerCustomTableClass()'. Else return None.
- """
- if tag not in _customTableRegistry:
- return None
- import importlib
- moduleName, className = _customTableRegistry[tag]
- module = importlib.import_module(moduleName)
- return getattr(module, className)
+ """Return the custom table class for tag, if one has been registered
+ with 'registerCustomTableClass()'. Else return None.
+ """
+ if tag not in _customTableRegistry:
+ return None
+ import importlib
+
+ moduleName, className = _customTableRegistry[tag]
+ module = importlib.import_module(moduleName)
+ return getattr(module, className)
def getTableClass(tag):
- """Fetch the packer/unpacker class for a table."""
- tableClass = getCustomTableClass(tag)
- if tableClass is not None:
- return tableClass
- module = getTableModule(tag)
- if module is None:
- from .tables.DefaultTable import DefaultTable
- return DefaultTable
- pyTag = tagToIdentifier(tag)
- tableClass = getattr(module, "table_" + pyTag)
- return tableClass
+ """Fetch the packer/unpacker class for a table."""
+ tableClass = getCustomTableClass(tag)
+ if tableClass is not None:
+ return tableClass
+ module = getTableModule(tag)
+ if module is None:
+ from .tables.DefaultTable import DefaultTable
+
+ return DefaultTable
+ pyTag = tagToIdentifier(tag)
+ tableClass = getattr(module, "table_" + pyTag)
+ return tableClass
def getClassTag(klass):
- """Fetch the table tag for a class object."""
- name = klass.__name__
- assert name[:6] == 'table_'
- name = name[6:] # Chop 'table_'
- return identifierToTag(name)
+ """Fetch the table tag for a class object."""
+ name = klass.__name__
+ assert name[:6] == "table_"
+ name = name[6:] # Chop 'table_'
+ return identifierToTag(name)
def newTable(tag):
- """Return a new instance of a table."""
- tableClass = getTableClass(tag)
- return tableClass(tag)
+ """Return a new instance of a table."""
+ tableClass = getTableClass(tag)
+ return tableClass(tag)
def _escapechar(c):
- """Helper function for tagToIdentifier()"""
- import re
- if re.match("[a-z0-9]", c):
- return "_" + c
- elif re.match("[A-Z]", c):
- return c + "_"
- else:
- return hex(byteord(c))[2:]
+ """Helper function for tagToIdentifier()"""
+ import re
+
+ if re.match("[a-z0-9]", c):
+ return "_" + c
+ elif re.match("[A-Z]", c):
+ return c + "_"
+ else:
+ return hex(byteord(c))[2:]
def tagToIdentifier(tag):
- """Convert a table tag to a valid (but UGLY) python identifier,
- as well as a filename that's guaranteed to be unique even on a
- caseless file system. Each character is mapped to two characters.
- Lowercase letters get an underscore before the letter, uppercase
- letters get an underscore after the letter. Trailing spaces are
- trimmed. Illegal characters are escaped as two hex bytes. If the
- result starts with a number (as the result of a hex escape), an
- extra underscore is prepended. Examples::
-
- >>> tagToIdentifier('glyf')
- '_g_l_y_f'
- >>> tagToIdentifier('cvt ')
- '_c_v_t'
- >>> tagToIdentifier('OS/2')
- 'O_S_2f_2'
- """
- import re
- tag = Tag(tag)
- if tag == "GlyphOrder":
- return tag
- assert len(tag) == 4, "tag should be 4 characters long"
- while len(tag) > 1 and tag[-1] == ' ':
- tag = tag[:-1]
- ident = ""
- for c in tag:
- ident = ident + _escapechar(c)
- if re.match("[0-9]", ident):
- ident = "_" + ident
- return ident
+ """Convert a table tag to a valid (but UGLY) python identifier,
+ as well as a filename that's guaranteed to be unique even on a
+ caseless file system. Each character is mapped to two characters.
+ Lowercase letters get an underscore before the letter, uppercase
+ letters get an underscore after the letter. Trailing spaces are
+ trimmed. Illegal characters are escaped as two hex bytes. If the
+ result starts with a number (as the result of a hex escape), an
+ extra underscore is prepended. Examples::
+
+ >>> tagToIdentifier('glyf')
+ '_g_l_y_f'
+ >>> tagToIdentifier('cvt ')
+ '_c_v_t'
+ >>> tagToIdentifier('OS/2')
+ 'O_S_2f_2'
+ """
+ import re
+
+ tag = Tag(tag)
+ if tag == "GlyphOrder":
+ return tag
+ assert len(tag) == 4, "tag should be 4 characters long"
+ while len(tag) > 1 and tag[-1] == " ":
+ tag = tag[:-1]
+ ident = ""
+ for c in tag:
+ ident = ident + _escapechar(c)
+ if re.match("[0-9]", ident):
+ ident = "_" + ident
+ return ident
def identifierToTag(ident):
- """the opposite of tagToIdentifier()"""
- if ident == "GlyphOrder":
- return ident
- if len(ident) % 2 and ident[0] == "_":
- ident = ident[1:]
- assert not (len(ident) % 2)
- tag = ""
- for i in range(0, len(ident), 2):
- if ident[i] == "_":
- tag = tag + ident[i+1]
- elif ident[i+1] == "_":
- tag = tag + ident[i]
- else:
- # assume hex
- tag = tag + chr(int(ident[i:i+2], 16))
- # append trailing spaces
- tag = tag + (4 - len(tag)) * ' '
- return Tag(tag)
+ """the opposite of tagToIdentifier()"""
+ if ident == "GlyphOrder":
+ return ident
+ if len(ident) % 2 and ident[0] == "_":
+ ident = ident[1:]
+ assert not (len(ident) % 2)
+ tag = ""
+ for i in range(0, len(ident), 2):
+ if ident[i] == "_":
+ tag = tag + ident[i + 1]
+ elif ident[i + 1] == "_":
+ tag = tag + ident[i]
+ else:
+ # assume hex
+ tag = tag + chr(int(ident[i : i + 2], 16))
+ # append trailing spaces
+ tag = tag + (4 - len(tag)) * " "
+ return Tag(tag)
def tagToXML(tag):
- """Similarly to tagToIdentifier(), this converts a TT tag
- to a valid XML element name. Since XML element names are
- case sensitive, this is a fairly simple/readable translation.
- """
- import re
- tag = Tag(tag)
- if tag == "OS/2":
- return "OS_2"
- elif tag == "GlyphOrder":
- return tag
- if re.match("[A-Za-z_][A-Za-z_0-9]* *$", tag):
- return tag.strip()
- else:
- return tagToIdentifier(tag)
+ """Similarly to tagToIdentifier(), this converts a TT tag
+ to a valid XML element name. Since XML element names are
+ case sensitive, this is a fairly simple/readable translation.
+ """
+ import re
+
+ tag = Tag(tag)
+ if tag == "OS/2":
+ return "OS_2"
+ elif tag == "GlyphOrder":
+ return tag
+ if re.match("[A-Za-z_][A-Za-z_0-9]* *$", tag):
+ return tag.strip()
+ else:
+ return tagToIdentifier(tag)
def xmlToTag(tag):
- """The opposite of tagToXML()"""
- if tag == "OS_2":
- return Tag("OS/2")
- if len(tag) == 8:
- return identifierToTag(tag)
- else:
- return Tag(tag + " " * (4 - len(tag)))
-
+ """The opposite of tagToXML()"""
+ if tag == "OS_2":
+ return Tag("OS/2")
+ if len(tag) == 8:
+ return identifierToTag(tag)
+ else:
+ return Tag(tag + " " * (4 - len(tag)))
# Table order as recommended in the OpenType specification 1.4
-TTFTableOrder = ["head", "hhea", "maxp", "OS/2", "hmtx", "LTSH", "VDMX",
- "hdmx", "cmap", "fpgm", "prep", "cvt ", "loca", "glyf",
- "kern", "name", "post", "gasp", "PCLT"]
+TTFTableOrder = [
+ "head",
+ "hhea",
+ "maxp",
+ "OS/2",
+ "hmtx",
+ "LTSH",
+ "VDMX",
+ "hdmx",
+ "cmap",
+ "fpgm",
+ "prep",
+ "cvt ",
+ "loca",
+ "glyf",
+ "kern",
+ "name",
+ "post",
+ "gasp",
+ "PCLT",
+]
+
+OTFTableOrder = ["head", "hhea", "maxp", "OS/2", "name", "cmap", "post", "CFF "]
-OTFTableOrder = ["head", "hhea", "maxp", "OS/2", "name", "cmap", "post",
- "CFF "]
def sortedTagList(tagList, tableOrder=None):
- """Return a sorted copy of tagList, sorted according to the OpenType
- specification, or according to a custom tableOrder. If given and not
- None, tableOrder needs to be a list of tag names.
- """
- tagList = sorted(tagList)
- if tableOrder is None:
- if "DSIG" in tagList:
- # DSIG should be last (XXX spec reference?)
- tagList.remove("DSIG")
- tagList.append("DSIG")
- if "CFF " in tagList:
- tableOrder = OTFTableOrder
- else:
- tableOrder = TTFTableOrder
- orderedTables = []
- for tag in tableOrder:
- if tag in tagList:
- orderedTables.append(tag)
- tagList.remove(tag)
- orderedTables.extend(tagList)
- return orderedTables
+ """Return a sorted copy of tagList, sorted according to the OpenType
+ specification, or according to a custom tableOrder. If given and not
+ None, tableOrder needs to be a list of tag names.
+ """
+ tagList = sorted(tagList)
+ if tableOrder is None:
+ if "DSIG" in tagList:
+ # DSIG should be last (XXX spec reference?)
+ tagList.remove("DSIG")
+ tagList.append("DSIG")
+ if "CFF " in tagList:
+ tableOrder = OTFTableOrder
+ else:
+ tableOrder = TTFTableOrder
+ orderedTables = []
+ for tag in tableOrder:
+ if tag in tagList:
+ orderedTables.append(tag)
+ tagList.remove(tag)
+ orderedTables.extend(tagList)
+ return orderedTables
def reorderFontTables(inFile, outFile, tableOrder=None, checkChecksums=False):
- """Rewrite a font file, ordering the tables as recommended by the
- OpenType specification 1.4.
- """
- inFile.seek(0)
- outFile.seek(0)
- reader = SFNTReader(inFile, checkChecksums=checkChecksums)
- writer = SFNTWriter(outFile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData)
- tables = list(reader.keys())
- for tag in sortedTagList(tables, tableOrder):
- writer[tag] = reader[tag]
- writer.close()
+ """Rewrite a font file, ordering the tables as recommended by the
+ OpenType specification 1.4.
+ """
+ inFile.seek(0)
+ outFile.seek(0)
+ reader = SFNTReader(inFile, checkChecksums=checkChecksums)
+ writer = SFNTWriter(
+ outFile,
+ len(reader.tables),
+ reader.sfntVersion,
+ reader.flavor,
+ reader.flavorData,
+ )
+ tables = list(reader.keys())
+ for tag in sortedTagList(tables, tableOrder):
+ writer[tag] = reader[tag]
+ writer.close()
def maxPowerOfTwo(x):
- """Return the highest exponent of two, so that
- (2 ** exponent) <= x. Return 0 if x is 0.
- """
- exponent = 0
- while x:
- x = x >> 1
- exponent = exponent + 1
- return max(exponent - 1, 0)
+ """Return the highest exponent of two, so that
+ (2 ** exponent) <= x. Return 0 if x is 0.
+ """
+ exponent = 0
+ while x:
+ x = x >> 1
+ exponent = exponent + 1
+ return max(exponent - 1, 0)
def getSearchRange(n, itemSize=16):
- """Calculate searchRange, entrySelector, rangeShift.
- """
- # itemSize defaults to 16, for backward compatibility
- # with upstream fonttools.
- exponent = maxPowerOfTwo(n)
- searchRange = (2 ** exponent) * itemSize
- entrySelector = exponent
- rangeShift = max(0, n * itemSize - searchRange)
- return searchRange, entrySelector, rangeShift
+ """Calculate searchRange, entrySelector, rangeShift."""
+ # itemSize defaults to 16, for backward compatibility
+ # with upstream fonttools.
+ exponent = maxPowerOfTwo(n)
+ searchRange = (2**exponent) * itemSize
+ entrySelector = exponent
+ rangeShift = max(0, n * itemSize - searchRange)
+ return searchRange, entrySelector, rangeShift
diff --git a/Lib/fontTools/ttLib/ttGlyphSet.py b/Lib/fontTools/ttLib/ttGlyphSet.py
index be26215b..d4384c89 100644
--- a/Lib/fontTools/ttLib/ttGlyphSet.py
+++ b/Lib/fontTools/ttLib/ttGlyphSet.py
@@ -1,221 +1,318 @@
"""GlyphSets returned by a TTFont."""
-from fontTools.misc.fixedTools import otRound
+from abc import ABC, abstractmethod
+from collections.abc import Mapping
+from contextlib import contextmanager
from copy import copy
+from types import SimpleNamespace
+from fontTools.misc.fixedTools import otRound
+from fontTools.misc.loggingTools import deprecateFunction
+from fontTools.misc.transform import Transform
+from fontTools.pens.transformPen import TransformPen, TransformPointPen
+
+
+class _TTGlyphSet(Mapping):
+
+ """Generic dict-like GlyphSet class that pulls metrics from hmtx and
+ glyph shape from TrueType or CFF.
+ """
+
+ def __init__(self, font, location, glyphsMapping):
+ self.font = font
+ self.defaultLocationNormalized = (
+ {axis.axisTag: 0 for axis in self.font["fvar"].axes}
+ if "fvar" in self.font
+ else {}
+ )
+ self.location = location if location is not None else {}
+ self.rawLocation = {} # VarComponent-only location
+ self.originalLocation = location if location is not None else {}
+ self.depth = 0
+ self.locationStack = []
+ self.rawLocationStack = []
+ self.glyphsMapping = glyphsMapping
+ self.hMetrics = font["hmtx"].metrics
+ self.vMetrics = getattr(font.get("vmtx"), "metrics", None)
+ self.hvarTable = None
+ if location:
+ from fontTools.varLib.varStore import VarStoreInstancer
+
+ self.hvarTable = getattr(font.get("HVAR"), "table", None)
+ if self.hvarTable is not None:
+ self.hvarInstancer = VarStoreInstancer(
+ self.hvarTable.VarStore, font["fvar"].axes, location
+ )
+ # TODO VVAR, VORG
+
+ @contextmanager
+ def pushLocation(self, location, reset: bool):
+ self.locationStack.append(self.location)
+ self.rawLocationStack.append(self.rawLocation)
+ if reset:
+ self.location = self.originalLocation.copy()
+ self.rawLocation = self.defaultLocationNormalized.copy()
+ else:
+ self.location = self.location.copy()
+ self.rawLocation = {}
+ self.location.update(location)
+ self.rawLocation.update(location)
+
+ try:
+ yield None
+ finally:
+ self.location = self.locationStack.pop()
+ self.rawLocation = self.rawLocationStack.pop()
+
+ @contextmanager
+ def pushDepth(self):
+ try:
+ depth = self.depth
+ self.depth += 1
+ yield depth
+ finally:
+ self.depth -= 1
+
+ def __contains__(self, glyphName):
+ return glyphName in self.glyphsMapping
+
+ def __iter__(self):
+ return iter(self.glyphsMapping.keys())
+
+ def __len__(self):
+ return len(self.glyphsMapping)
+
+ @deprecateFunction(
+ "use 'glyphName in glyphSet' instead", category=DeprecationWarning
+ )
+ def has_key(self, glyphName):
+ return glyphName in self.glyphsMapping
+
+
+class _TTGlyphSetGlyf(_TTGlyphSet):
+ def __init__(self, font, location):
+ self.glyfTable = font["glyf"]
+ super().__init__(font, location, self.glyfTable)
+ self.gvarTable = font.get("gvar")
+
+ def __getitem__(self, glyphName):
+ return _TTGlyphGlyf(self, glyphName)
+
+
+class _TTGlyphSetCFF(_TTGlyphSet):
+ def __init__(self, font, location):
+ tableTag = "CFF2" if "CFF2" in font else "CFF "
+ self.charStrings = list(font[tableTag].cff.values())[0].CharStrings
+ super().__init__(font, location, self.charStrings)
+ self.blender = None
+ if location:
+ from fontTools.varLib.varStore import VarStoreInstancer
+
+ varStore = getattr(self.charStrings, "varStore", None)
+ if varStore is not None:
+ instancer = VarStoreInstancer(
+ varStore.otVarStore, font["fvar"].axes, location
+ )
+ self.blender = instancer.interpolateFromDeltas
+
+ def __getitem__(self, glyphName):
+ return _TTGlyphCFF(self, glyphName)
+
+
+class _TTGlyph(ABC):
+
+ """Glyph object that supports the Pen protocol, meaning that it has
+ .draw() and .drawPoints() methods that take a pen object as their only
+ argument. Additionally there are 'width' and 'lsb' attributes, read from
+ the 'hmtx' table.
+
+ If the font contains a 'vmtx' table, there will also be 'height' and 'tsb'
+ attributes.
+ """
+
+ def __init__(self, glyphSet, glyphName):
+ self.glyphSet = glyphSet
+ self.name = glyphName
+ self.width, self.lsb = glyphSet.hMetrics[glyphName]
+ if glyphSet.vMetrics is not None:
+ self.height, self.tsb = glyphSet.vMetrics[glyphName]
+ else:
+ self.height, self.tsb = None, None
+ if glyphSet.location and glyphSet.hvarTable is not None:
+ varidx = (
+ glyphSet.font.getGlyphID(glyphName)
+ if glyphSet.hvarTable.AdvWidthMap is None
+ else glyphSet.hvarTable.AdvWidthMap.mapping[glyphName]
+ )
+ self.width += glyphSet.hvarInstancer[varidx]
+ # TODO: VVAR/VORG
+
+ @abstractmethod
+ def draw(self, pen):
+ """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details
+ how that works.
+ """
+ raise NotImplementedError
+
+ def drawPoints(self, pen):
+ """Draw the glyph onto ``pen``. See fontTools.pens.pointPen for details
+ how that works.
+ """
+ from fontTools.pens.pointPen import SegmentToPointPen
+
+ self.draw(SegmentToPointPen(pen))
-class _TTGlyphSet(object):
-
- """Generic dict-like GlyphSet class that pulls metrics from hmtx and
- glyph shape from TrueType or CFF.
- """
-
- def __init__(self, ttFont, glyphs, glyphType):
- """Construct a new glyphset.
-
- Args:
- font (TTFont): The font object (used to get metrics).
- glyphs (dict): A dictionary mapping glyph names to ``_TTGlyph`` objects.
- glyphType (class): Either ``_TTGlyphCFF`` or ``_TTGlyphGlyf``.
- """
- self._glyphs = glyphs
- self._hmtx = ttFont['hmtx']
- self._vmtx = ttFont['vmtx'] if 'vmtx' in ttFont else None
- self._glyphType = glyphType
-
- def keys(self):
- return list(self._glyphs.keys())
-
- def has_key(self, glyphName):
- return glyphName in self._glyphs
-
- __contains__ = has_key
-
- def __getitem__(self, glyphName):
- horizontalMetrics = self._hmtx[glyphName]
- verticalMetrics = self._vmtx[glyphName] if self._vmtx else None
- return self._glyphType(
- self, self._glyphs[glyphName], horizontalMetrics, verticalMetrics)
-
- def __len__(self):
- return len(self._glyphs)
-
- def get(self, glyphName, default=None):
- try:
- return self[glyphName]
- except KeyError:
- return default
-
-class _TTGlyph(object):
-
- """Wrapper for a TrueType glyph that supports the Pen protocol, meaning
- that it has .draw() and .drawPoints() methods that take a pen object as
- their only argument. Additionally there are 'width' and 'lsb' attributes,
- read from the 'hmtx' table.
-
- If the font contains a 'vmtx' table, there will also be 'height' and 'tsb'
- attributes.
- """
-
- def __init__(self, glyphset, glyph, horizontalMetrics, verticalMetrics=None):
- """Construct a new _TTGlyph.
-
- Args:
- glyphset (_TTGlyphSet): A glyphset object used to resolve components.
- glyph (ttLib.tables._g_l_y_f.Glyph): The glyph object.
- horizontalMetrics (int, int): The glyph's width and left sidebearing.
- """
- self._glyphset = glyphset
- self._glyph = glyph
- self.width, self.lsb = horizontalMetrics
- if verticalMetrics:
- self.height, self.tsb = verticalMetrics
- else:
- self.height, self.tsb = None, None
-
- def draw(self, pen):
- """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details
- how that works.
- """
- self._glyph.draw(pen)
-
- def drawPoints(self, pen):
- from fontTools.pens.pointPen import SegmentToPointPen
- self.draw(SegmentToPointPen(pen))
-
-class _TTGlyphCFF(_TTGlyph):
- pass
class _TTGlyphGlyf(_TTGlyph):
+ def draw(self, pen):
+ """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details
+ how that works.
+ """
+ glyph, offset = self._getGlyphAndOffset()
+
+ with self.glyphSet.pushDepth() as depth:
+ if depth:
+ offset = 0 # Offset should only apply at top-level
+
+ if glyph.isVarComposite():
+ self._drawVarComposite(glyph, pen, False)
+ return
+
+ glyph.draw(pen, self.glyphSet.glyfTable, offset)
+
+ def drawPoints(self, pen):
+ """Draw the glyph onto ``pen``. See fontTools.pens.pointPen for details
+ how that works.
+ """
+ glyph, offset = self._getGlyphAndOffset()
+
+ with self.glyphSet.pushDepth() as depth:
+ if depth:
+ offset = 0 # Offset should only apply at top-level
+
+ if glyph.isVarComposite():
+ self._drawVarComposite(glyph, pen, True)
+ return
+
+ glyph.drawPoints(pen, self.glyphSet.glyfTable, offset)
+
+ def _drawVarComposite(self, glyph, pen, isPointPen):
+ from fontTools.ttLib.tables._g_l_y_f import (
+ VarComponentFlags,
+ VAR_COMPONENT_TRANSFORM_MAPPING,
+ )
+
+ for comp in glyph.components:
+ with self.glyphSet.pushLocation(
+ comp.location, comp.flags & VarComponentFlags.RESET_UNSPECIFIED_AXES
+ ):
+ try:
+ pen.addVarComponent(
+ comp.glyphName, comp.transform, self.glyphSet.rawLocation
+ )
+ except AttributeError:
+ t = comp.transform.toTransform()
+ if isPointPen:
+ tPen = TransformPointPen(pen, t)
+ self.glyphSet[comp.glyphName].drawPoints(tPen)
+ else:
+ tPen = TransformPen(pen, t)
+ self.glyphSet[comp.glyphName].draw(tPen)
+
+ def _getGlyphAndOffset(self):
+ if self.glyphSet.location and self.glyphSet.gvarTable is not None:
+ glyph = self._getGlyphInstance()
+ else:
+ glyph = self.glyphSet.glyfTable[self.name]
+
+ offset = self.lsb - glyph.xMin if hasattr(glyph, "xMin") else 0
+ return glyph, offset
+
+ def _getGlyphInstance(self):
+ from fontTools.varLib.iup import iup_delta
+ from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
+ from fontTools.varLib.models import supportScalar
+
+ glyphSet = self.glyphSet
+ glyfTable = glyphSet.glyfTable
+ variations = glyphSet.gvarTable.variations[self.name]
+ hMetrics = glyphSet.hMetrics
+ vMetrics = glyphSet.vMetrics
+ coordinates, _ = glyfTable._getCoordinatesAndControls(
+ self.name, hMetrics, vMetrics
+ )
+ origCoords, endPts = None, None
+ for var in variations:
+ scalar = supportScalar(glyphSet.location, var.axes)
+ if not scalar:
+ continue
+ delta = var.coordinates
+ if None in delta:
+ if origCoords is None:
+ origCoords, control = glyfTable._getCoordinatesAndControls(
+ self.name, hMetrics, vMetrics
+ )
+ endPts = (
+ control[1] if control[0] >= 1 else list(range(len(control[1])))
+ )
+ delta = iup_delta(delta, origCoords, endPts)
+ coordinates += GlyphCoordinates(delta) * scalar
+
+ glyph = copy(glyfTable[self.name]) # Shallow copy
+ width, lsb, height, tsb = _setCoordinates(glyph, coordinates, glyfTable)
+ self.lsb = lsb
+ self.tsb = tsb
+ if glyphSet.hvarTable is None:
+ # no HVAR: let's set metrics from the phantom points
+ self.width = width
+ self.height = height
+ return glyph
+
- def draw(self, pen):
- """Draw the glyph onto Pen. See fontTools.pens.basePen for details
- how that works.
- """
- glyfTable = self._glyphset._glyphs
- glyph = self._glyph
- offset = self.lsb - glyph.xMin if hasattr(glyph, "xMin") else 0
- glyph.draw(pen, glyfTable, offset)
-
- def drawPoints(self, pen):
- """Draw the glyph onto PointPen. See fontTools.pens.pointPen
- for details how that works.
- """
- glyfTable = self._glyphset._glyphs
- glyph = self._glyph
- offset = self.lsb - glyph.xMin if hasattr(glyph, "xMin") else 0
- glyph.drawPoints(pen, glyfTable, offset)
-
-
-
-class _TTVarGlyphSet(_TTGlyphSet):
-
- def __init__(self, font, location, normalized=False):
- self._ttFont = font
- self._glyphs = font['glyf']
-
- if not normalized:
- from fontTools.varLib.models import normalizeLocation, piecewiseLinearMap
-
- axes = {a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in font['fvar'].axes}
- location = normalizeLocation(location, axes)
- if 'avar' in font:
- avar = font['avar']
- avarSegments = avar.segments
- new_location = {}
- for axis_tag, value in location.items():
- avarMapping = avarSegments.get(axis_tag, None)
- if avarMapping is not None:
- value = piecewiseLinearMap(value, avarMapping)
- new_location[axis_tag] = value
- location = new_location
- del new_location
-
- self.location = location
-
- def __getitem__(self, glyphName):
- if glyphName not in self._glyphs:
- raise KeyError(glyphName)
- return _TTVarGlyphGlyf(self._ttFont, glyphName, self.location)
+class _TTGlyphCFF(_TTGlyph):
+ def draw(self, pen):
+ """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details
+ how that works.
+ """
+ self.glyphSet.charStrings[self.name].draw(pen, self.glyphSet.blender)
def _setCoordinates(glyph, coord, glyfTable):
- # Handle phantom points for (left, right, top, bottom) positions.
- assert len(coord) >= 4
- if not hasattr(glyph, 'xMin'):
- glyph.recalcBounds(glyfTable)
- leftSideX = coord[-4][0]
- rightSideX = coord[-3][0]
- topSideY = coord[-2][1]
- bottomSideY = coord[-1][1]
-
- for _ in range(4):
- del coord[-1]
-
- if glyph.isComposite():
- assert len(coord) == len(glyph.components)
- for p,comp in zip(coord, glyph.components):
- if hasattr(comp, 'x'):
- comp.x,comp.y = p
- elif glyph.numberOfContours == 0:
- assert len(coord) == 0
- else:
- assert len(coord) == len(glyph.coordinates)
- glyph.coordinates = coord
-
- glyph.recalcBounds(glyfTable)
-
- horizontalAdvanceWidth = otRound(rightSideX - leftSideX)
- verticalAdvanceWidth = otRound(topSideY - bottomSideY)
- leftSideBearing = otRound(glyph.xMin - leftSideX)
- topSideBearing = otRound(topSideY - glyph.yMax)
- return (
- horizontalAdvanceWidth,
- leftSideBearing,
- verticalAdvanceWidth,
- topSideBearing,
- )
-
-
-class _TTVarGlyph(_TTGlyph):
- def __init__(self, ttFont, glyphName, location):
- self._ttFont = ttFont
- self._glyphName = glyphName
- self._location = location
- # draw() fills these in
- self.width = self.height = self.lsb = self.tsb = None
-
-
-class _TTVarGlyphGlyf(_TTVarGlyph):
-
- def draw(self, pen):
- from fontTools.varLib.iup import iup_delta
- from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
- from fontTools.varLib.models import supportScalar
-
- glyf = self._ttFont['glyf']
- hMetrics = self._ttFont['hmtx'].metrics
- vMetrics = getattr(self._ttFont.get('vmtx'), 'metrics', None)
-
- variations = self._ttFont['gvar'].variations[self._glyphName]
- coordinates, _ = glyf._getCoordinatesAndControls(self._glyphName, hMetrics, vMetrics)
- origCoords, endPts = None, None
- for var in variations:
- scalar = supportScalar(self._location, var.axes)
- if not scalar:
- continue
- delta = var.coordinates
- if None in delta:
- if origCoords is None:
- origCoords,control = glyf._getCoordinatesAndControls(self._glyphName, hMetrics, vMetrics)
- endPts = control[1] if control[0] >= 1 else list(range(len(control[1])))
- delta = iup_delta(delta, origCoords, endPts)
- coordinates += GlyphCoordinates(delta) * scalar
-
- glyph = copy(glyf[self._glyphName]) # Shallow copy
- width, lsb, height, tsb = _setCoordinates(glyph, coordinates, glyf)
- self.width = width
- self.lsb = lsb
- self.height = height
- self.tsb = tsb
- offset = lsb - glyph.xMin if hasattr(glyph, "xMin") else 0
- glyph.draw(pen, glyf, offset)
+ # Handle phantom points for (left, right, top, bottom) positions.
+ assert len(coord) >= 4
+ leftSideX = coord[-4][0]
+ rightSideX = coord[-3][0]
+ topSideY = coord[-2][1]
+ bottomSideY = coord[-1][1]
+
+ for _ in range(4):
+ del coord[-1]
+
+ if glyph.isComposite():
+ assert len(coord) == len(glyph.components)
+ glyph.components = [copy(comp) for comp in glyph.components] # Shallow copy
+ for p, comp in zip(coord, glyph.components):
+ if hasattr(comp, "x"):
+ comp.x, comp.y = p
+ elif glyph.isVarComposite():
+ glyph.components = [copy(comp) for comp in glyph.components] # Shallow copy
+ for comp in glyph.components:
+ coord = comp.setCoordinates(coord)
+ assert not coord
+ elif glyph.numberOfContours == 0:
+ assert len(coord) == 0
+ else:
+ assert len(coord) == len(glyph.coordinates)
+ glyph.coordinates = coord
+
+ glyph.recalcBounds(glyfTable)
+
+ horizontalAdvanceWidth = otRound(rightSideX - leftSideX)
+ verticalAdvanceWidth = otRound(topSideY - bottomSideY)
+ leftSideBearing = otRound(glyph.xMin - leftSideX)
+ topSideBearing = otRound(topSideY - glyph.yMax)
+ return (
+ horizontalAdvanceWidth,
+ leftSideBearing,
+ verticalAdvanceWidth,
+ topSideBearing,
+ )
diff --git a/Lib/fontTools/ttLib/woff2.py b/Lib/fontTools/ttLib/woff2.py
index b66661ab..9da2f7e6 100644
--- a/Lib/fontTools/ttLib/woff2.py
+++ b/Lib/fontTools/ttLib/woff2.py
@@ -6,11 +6,24 @@ from collections import OrderedDict
from fontTools.misc import sstruct
from fontTools.misc.arrayTools import calcIntBounds
from fontTools.misc.textTools import Tag, bytechr, byteord, bytesjoin, pad
-from fontTools.ttLib import (TTFont, TTLibError, getTableModule, getTableClass,
- getSearchRange)
-from fontTools.ttLib.sfnt import (SFNTReader, SFNTWriter, DirectoryEntry,
- WOFFFlavorData, sfntDirectoryFormat, sfntDirectorySize, SFNTDirectoryEntry,
- sfntDirectoryEntrySize, calcChecksum)
+from fontTools.ttLib import (
+ TTFont,
+ TTLibError,
+ getTableModule,
+ getTableClass,
+ getSearchRange,
+)
+from fontTools.ttLib.sfnt import (
+ SFNTReader,
+ SFNTWriter,
+ DirectoryEntry,
+ WOFFFlavorData,
+ sfntDirectoryFormat,
+ sfntDirectorySize,
+ SFNTDirectoryEntry,
+ sfntDirectoryEntrySize,
+ calcChecksum,
+)
from fontTools.ttLib.tables import ttProgram, _g_l_y_f
import logging
@@ -19,454 +32,473 @@ log = logging.getLogger("fontTools.ttLib.woff2")
haveBrotli = False
try:
- try:
- import brotlicffi as brotli
- except ImportError:
- import brotli
- haveBrotli = True
+ try:
+ import brotlicffi as brotli
+ except ImportError:
+ import brotli
+ haveBrotli = True
except ImportError:
- pass
+ pass
class WOFF2Reader(SFNTReader):
-
- flavor = "woff2"
-
- def __init__(self, file, checkChecksums=0, fontNumber=-1):
- if not haveBrotli:
- log.error(
- 'The WOFF2 decoder requires the Brotli Python extension, available at: '
- 'https://github.com/google/brotli')
- raise ImportError("No module named brotli")
-
- self.file = file
-
- signature = Tag(self.file.read(4))
- if signature != b"wOF2":
- raise TTLibError("Not a WOFF2 font (bad signature)")
-
- self.file.seek(0)
- self.DirectoryEntry = WOFF2DirectoryEntry
- data = self.file.read(woff2DirectorySize)
- if len(data) != woff2DirectorySize:
- raise TTLibError('Not a WOFF2 font (not enough data)')
- sstruct.unpack(woff2DirectoryFormat, data, self)
-
- self.tables = OrderedDict()
- offset = 0
- for i in range(self.numTables):
- entry = self.DirectoryEntry()
- entry.fromFile(self.file)
- tag = Tag(entry.tag)
- self.tables[tag] = entry
- entry.offset = offset
- offset += entry.length
-
- totalUncompressedSize = offset
- compressedData = self.file.read(self.totalCompressedSize)
- decompressedData = brotli.decompress(compressedData)
- if len(decompressedData) != totalUncompressedSize:
- raise TTLibError(
- 'unexpected size for decompressed font data: expected %d, found %d'
- % (totalUncompressedSize, len(decompressedData)))
- self.transformBuffer = BytesIO(decompressedData)
-
- self.file.seek(0, 2)
- if self.length != self.file.tell():
- raise TTLibError("reported 'length' doesn't match the actual file size")
-
- self.flavorData = WOFF2FlavorData(self)
-
- # make empty TTFont to store data while reconstructing tables
- self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False)
-
- def __getitem__(self, tag):
- """Fetch the raw table data. Reconstruct transformed tables."""
- entry = self.tables[Tag(tag)]
- if not hasattr(entry, 'data'):
- if entry.transformed:
- entry.data = self.reconstructTable(tag)
- else:
- entry.data = entry.loadData(self.transformBuffer)
- return entry.data
-
- def reconstructTable(self, tag):
- """Reconstruct table named 'tag' from transformed data."""
- entry = self.tables[Tag(tag)]
- rawData = entry.loadData(self.transformBuffer)
- if tag == 'glyf':
- # no need to pad glyph data when reconstructing
- padding = self.padding if hasattr(self, 'padding') else None
- data = self._reconstructGlyf(rawData, padding)
- elif tag == 'loca':
- data = self._reconstructLoca()
- elif tag == 'hmtx':
- data = self._reconstructHmtx(rawData)
- else:
- raise TTLibError("transform for table '%s' is unknown" % tag)
- return data
-
- def _reconstructGlyf(self, data, padding=None):
- """ Return recostructed glyf table data, and set the corresponding loca's
- locations. Optionally pad glyph offsets to the specified number of bytes.
- """
- self.ttFont['loca'] = WOFF2LocaTable()
- glyfTable = self.ttFont['glyf'] = WOFF2GlyfTable()
- glyfTable.reconstruct(data, self.ttFont)
- if padding:
- glyfTable.padding = padding
- data = glyfTable.compile(self.ttFont)
- return data
-
- def _reconstructLoca(self):
- """ Return reconstructed loca table data. """
- if 'loca' not in self.ttFont:
- # make sure glyf is reconstructed first
- self.tables['glyf'].data = self.reconstructTable('glyf')
- locaTable = self.ttFont['loca']
- data = locaTable.compile(self.ttFont)
- if len(data) != self.tables['loca'].origLength:
- raise TTLibError(
- "reconstructed 'loca' table doesn't match original size: "
- "expected %d, found %d"
- % (self.tables['loca'].origLength, len(data)))
- return data
-
- def _reconstructHmtx(self, data):
- """ Return reconstructed hmtx table data. """
- # Before reconstructing 'hmtx' table we need to parse other tables:
- # 'glyf' is required for reconstructing the sidebearings from the glyphs'
- # bounding box; 'hhea' is needed for the numberOfHMetrics field.
- if "glyf" in self.flavorData.transformedTables:
- # transformed 'glyf' table is self-contained, thus 'loca' not needed
- tableDependencies = ("maxp", "hhea", "glyf")
- else:
- # decompiling untransformed 'glyf' requires 'loca', which requires 'head'
- tableDependencies = ("maxp", "head", "hhea", "loca", "glyf")
- for tag in tableDependencies:
- self._decompileTable(tag)
- hmtxTable = self.ttFont["hmtx"] = WOFF2HmtxTable()
- hmtxTable.reconstruct(data, self.ttFont)
- data = hmtxTable.compile(self.ttFont)
- return data
-
- def _decompileTable(self, tag):
- """Decompile table data and store it inside self.ttFont."""
- data = self[tag]
- if self.ttFont.isLoaded(tag):
- return self.ttFont[tag]
- tableClass = getTableClass(tag)
- table = tableClass(tag)
- self.ttFont.tables[tag] = table
- table.decompile(data, self.ttFont)
+ flavor = "woff2"
+
+ def __init__(self, file, checkChecksums=0, fontNumber=-1):
+ if not haveBrotli:
+ log.error(
+ "The WOFF2 decoder requires the Brotli Python extension, available at: "
+ "https://github.com/google/brotli"
+ )
+ raise ImportError("No module named brotli")
+
+ self.file = file
+
+ signature = Tag(self.file.read(4))
+ if signature != b"wOF2":
+ raise TTLibError("Not a WOFF2 font (bad signature)")
+
+ self.file.seek(0)
+ self.DirectoryEntry = WOFF2DirectoryEntry
+ data = self.file.read(woff2DirectorySize)
+ if len(data) != woff2DirectorySize:
+ raise TTLibError("Not a WOFF2 font (not enough data)")
+ sstruct.unpack(woff2DirectoryFormat, data, self)
+
+ self.tables = OrderedDict()
+ offset = 0
+ for i in range(self.numTables):
+ entry = self.DirectoryEntry()
+ entry.fromFile(self.file)
+ tag = Tag(entry.tag)
+ self.tables[tag] = entry
+ entry.offset = offset
+ offset += entry.length
+
+ totalUncompressedSize = offset
+ compressedData = self.file.read(self.totalCompressedSize)
+ decompressedData = brotli.decompress(compressedData)
+ if len(decompressedData) != totalUncompressedSize:
+ raise TTLibError(
+ "unexpected size for decompressed font data: expected %d, found %d"
+ % (totalUncompressedSize, len(decompressedData))
+ )
+ self.transformBuffer = BytesIO(decompressedData)
+
+ self.file.seek(0, 2)
+ if self.length != self.file.tell():
+ raise TTLibError("reported 'length' doesn't match the actual file size")
+
+ self.flavorData = WOFF2FlavorData(self)
+
+ # make empty TTFont to store data while reconstructing tables
+ self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False)
+
+ def __getitem__(self, tag):
+ """Fetch the raw table data. Reconstruct transformed tables."""
+ entry = self.tables[Tag(tag)]
+ if not hasattr(entry, "data"):
+ if entry.transformed:
+ entry.data = self.reconstructTable(tag)
+ else:
+ entry.data = entry.loadData(self.transformBuffer)
+ return entry.data
+
+ def reconstructTable(self, tag):
+ """Reconstruct table named 'tag' from transformed data."""
+ entry = self.tables[Tag(tag)]
+ rawData = entry.loadData(self.transformBuffer)
+ if tag == "glyf":
+ # no need to pad glyph data when reconstructing
+ padding = self.padding if hasattr(self, "padding") else None
+ data = self._reconstructGlyf(rawData, padding)
+ elif tag == "loca":
+ data = self._reconstructLoca()
+ elif tag == "hmtx":
+ data = self._reconstructHmtx(rawData)
+ else:
+ raise TTLibError("transform for table '%s' is unknown" % tag)
+ return data
+
+ def _reconstructGlyf(self, data, padding=None):
+ """Return recostructed glyf table data, and set the corresponding loca's
+ locations. Optionally pad glyph offsets to the specified number of bytes.
+ """
+ self.ttFont["loca"] = WOFF2LocaTable()
+ glyfTable = self.ttFont["glyf"] = WOFF2GlyfTable()
+ glyfTable.reconstruct(data, self.ttFont)
+ if padding:
+ glyfTable.padding = padding
+ data = glyfTable.compile(self.ttFont)
+ return data
+
+ def _reconstructLoca(self):
+ """Return reconstructed loca table data."""
+ if "loca" not in self.ttFont:
+ # make sure glyf is reconstructed first
+ self.tables["glyf"].data = self.reconstructTable("glyf")
+ locaTable = self.ttFont["loca"]
+ data = locaTable.compile(self.ttFont)
+ if len(data) != self.tables["loca"].origLength:
+ raise TTLibError(
+ "reconstructed 'loca' table doesn't match original size: "
+ "expected %d, found %d" % (self.tables["loca"].origLength, len(data))
+ )
+ return data
+
+ def _reconstructHmtx(self, data):
+ """Return reconstructed hmtx table data."""
+ # Before reconstructing 'hmtx' table we need to parse other tables:
+ # 'glyf' is required for reconstructing the sidebearings from the glyphs'
+ # bounding box; 'hhea' is needed for the numberOfHMetrics field.
+ if "glyf" in self.flavorData.transformedTables:
+ # transformed 'glyf' table is self-contained, thus 'loca' not needed
+ tableDependencies = ("maxp", "hhea", "glyf")
+ else:
+ # decompiling untransformed 'glyf' requires 'loca', which requires 'head'
+ tableDependencies = ("maxp", "head", "hhea", "loca", "glyf")
+ for tag in tableDependencies:
+ self._decompileTable(tag)
+ hmtxTable = self.ttFont["hmtx"] = WOFF2HmtxTable()
+ hmtxTable.reconstruct(data, self.ttFont)
+ data = hmtxTable.compile(self.ttFont)
+ return data
+
+ def _decompileTable(self, tag):
+ """Decompile table data and store it inside self.ttFont."""
+ data = self[tag]
+ if self.ttFont.isLoaded(tag):
+ return self.ttFont[tag]
+ tableClass = getTableClass(tag)
+ table = tableClass(tag)
+ self.ttFont.tables[tag] = table
+ table.decompile(data, self.ttFont)
class WOFF2Writer(SFNTWriter):
-
- flavor = "woff2"
-
- def __init__(self, file, numTables, sfntVersion="\000\001\000\000",
- flavor=None, flavorData=None):
- if not haveBrotli:
- log.error(
- 'The WOFF2 encoder requires the Brotli Python extension, available at: '
- 'https://github.com/google/brotli')
- raise ImportError("No module named brotli")
-
- self.file = file
- self.numTables = numTables
- self.sfntVersion = Tag(sfntVersion)
- self.flavorData = WOFF2FlavorData(data=flavorData)
-
- self.directoryFormat = woff2DirectoryFormat
- self.directorySize = woff2DirectorySize
- self.DirectoryEntry = WOFF2DirectoryEntry
-
- self.signature = Tag("wOF2")
-
- self.nextTableOffset = 0
- self.transformBuffer = BytesIO()
-
- self.tables = OrderedDict()
-
- # make empty TTFont to store data while normalising and transforming tables
- self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False)
-
- def __setitem__(self, tag, data):
- """Associate new entry named 'tag' with raw table data."""
- if tag in self.tables:
- raise TTLibError("cannot rewrite '%s' table" % tag)
- if tag == 'DSIG':
- # always drop DSIG table, since the encoding process can invalidate it
- self.numTables -= 1
- return
-
- entry = self.DirectoryEntry()
- entry.tag = Tag(tag)
- entry.flags = getKnownTagIndex(entry.tag)
- # WOFF2 table data are written to disk only on close(), after all tags
- # have been specified
- entry.data = data
-
- self.tables[tag] = entry
-
- def close(self):
- """ All tags must have been specified. Now write the table data and directory.
- """
- if len(self.tables) != self.numTables:
- raise TTLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(self.tables)))
-
- if self.sfntVersion in ("\x00\x01\x00\x00", "true"):
- isTrueType = True
- elif self.sfntVersion == "OTTO":
- isTrueType = False
- else:
- raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)")
-
- # The WOFF2 spec no longer requires the glyph offsets to be 4-byte aligned.
- # However, the reference WOFF2 implementation still fails to reconstruct
- # 'unpadded' glyf tables, therefore we need to 'normalise' them.
- # See:
- # https://github.com/khaledhosny/ots/issues/60
- # https://github.com/google/woff2/issues/15
- if (
- isTrueType
- and "glyf" in self.flavorData.transformedTables
- and "glyf" in self.tables
- ):
- self._normaliseGlyfAndLoca(padding=4)
- self._setHeadTransformFlag()
-
- # To pass the legacy OpenType Sanitiser currently included in browsers,
- # we must sort the table directory and data alphabetically by tag.
- # See:
- # https://github.com/google/woff2/pull/3
- # https://lists.w3.org/Archives/Public/public-webfonts-wg/2015Mar/0000.html
- # TODO(user): remove to match spec once browsers are on newer OTS
- self.tables = OrderedDict(sorted(self.tables.items()))
-
- self.totalSfntSize = self._calcSFNTChecksumsLengthsAndOffsets()
-
- fontData = self._transformTables()
- compressedFont = brotli.compress(fontData, mode=brotli.MODE_FONT)
-
- self.totalCompressedSize = len(compressedFont)
- self.length = self._calcTotalSize()
- self.majorVersion, self.minorVersion = self._getVersion()
- self.reserved = 0
-
- directory = self._packTableDirectory()
- self.file.seek(0)
- self.file.write(pad(directory + compressedFont, size=4))
- self._writeFlavorData()
-
- def _normaliseGlyfAndLoca(self, padding=4):
- """ Recompile glyf and loca tables, aligning glyph offsets to multiples of
- 'padding' size. Update the head table's 'indexToLocFormat' accordingly while
- compiling loca.
- """
- if self.sfntVersion == "OTTO":
- return
-
- for tag in ('maxp', 'head', 'loca', 'glyf'):
- self._decompileTable(tag)
- self.ttFont['glyf'].padding = padding
- for tag in ('glyf', 'loca'):
- self._compileTable(tag)
-
- def _setHeadTransformFlag(self):
- """ Set bit 11 of 'head' table flags to indicate that the font has undergone
- a lossless modifying transform. Re-compile head table data."""
- self._decompileTable('head')
- self.ttFont['head'].flags |= (1 << 11)
- self._compileTable('head')
-
- def _decompileTable(self, tag):
- """ Fetch table data, decompile it, and store it inside self.ttFont. """
- tag = Tag(tag)
- if tag not in self.tables:
- raise TTLibError("missing required table: %s" % tag)
- if self.ttFont.isLoaded(tag):
- return
- data = self.tables[tag].data
- if tag == 'loca':
- tableClass = WOFF2LocaTable
- elif tag == 'glyf':
- tableClass = WOFF2GlyfTable
- elif tag == 'hmtx':
- tableClass = WOFF2HmtxTable
- else:
- tableClass = getTableClass(tag)
- table = tableClass(tag)
- self.ttFont.tables[tag] = table
- table.decompile(data, self.ttFont)
-
- def _compileTable(self, tag):
- """ Compile table and store it in its 'data' attribute. """
- self.tables[tag].data = self.ttFont[tag].compile(self.ttFont)
-
- def _calcSFNTChecksumsLengthsAndOffsets(self):
- """ Compute the 'original' SFNT checksums, lengths and offsets for checksum
- adjustment calculation. Return the total size of the uncompressed font.
- """
- offset = sfntDirectorySize + sfntDirectoryEntrySize * len(self.tables)
- for tag, entry in self.tables.items():
- data = entry.data
- entry.origOffset = offset
- entry.origLength = len(data)
- if tag == 'head':
- entry.checkSum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:])
- else:
- entry.checkSum = calcChecksum(data)
- offset += (entry.origLength + 3) & ~3
- return offset
-
- def _transformTables(self):
- """Return transformed font data."""
- transformedTables = self.flavorData.transformedTables
- for tag, entry in self.tables.items():
- data = None
- if tag in transformedTables:
- data = self.transformTable(tag)
- if data is not None:
- entry.transformed = True
- if data is None:
- # pass-through the table data without transformation
- data = entry.data
- entry.transformed = False
- entry.offset = self.nextTableOffset
- entry.saveData(self.transformBuffer, data)
- self.nextTableOffset += entry.length
- self.writeMasterChecksum()
- fontData = self.transformBuffer.getvalue()
- return fontData
-
- def transformTable(self, tag):
- """Return transformed table data, or None if some pre-conditions aren't
- met -- in which case, the non-transformed table data will be used.
- """
- if tag == "loca":
- data = b""
- elif tag == "glyf":
- for tag in ('maxp', 'head', 'loca', 'glyf'):
- self._decompileTable(tag)
- glyfTable = self.ttFont['glyf']
- data = glyfTable.transform(self.ttFont)
- elif tag == "hmtx":
- if "glyf" not in self.tables:
- return
- for tag in ("maxp", "head", "hhea", "loca", "glyf", "hmtx"):
- self._decompileTable(tag)
- hmtxTable = self.ttFont["hmtx"]
- data = hmtxTable.transform(self.ttFont) # can be None
- else:
- raise TTLibError("Transform for table '%s' is unknown" % tag)
- return data
-
- def _calcMasterChecksum(self):
- """Calculate checkSumAdjustment."""
- tags = list(self.tables.keys())
- checksums = []
- for i in range(len(tags)):
- checksums.append(self.tables[tags[i]].checkSum)
-
- # Create a SFNT directory for checksum calculation purposes
- self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(self.numTables, 16)
- directory = sstruct.pack(sfntDirectoryFormat, self)
- tables = sorted(self.tables.items())
- for tag, entry in tables:
- sfntEntry = SFNTDirectoryEntry()
- sfntEntry.tag = entry.tag
- sfntEntry.checkSum = entry.checkSum
- sfntEntry.offset = entry.origOffset
- sfntEntry.length = entry.origLength
- directory = directory + sfntEntry.toString()
-
- directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize
- assert directory_end == len(directory)
-
- checksums.append(calcChecksum(directory))
- checksum = sum(checksums) & 0xffffffff
- # BiboAfba!
- checksumadjustment = (0xB1B0AFBA - checksum) & 0xffffffff
- return checksumadjustment
-
- def writeMasterChecksum(self):
- """Write checkSumAdjustment to the transformBuffer."""
- checksumadjustment = self._calcMasterChecksum()
- self.transformBuffer.seek(self.tables['head'].offset + 8)
- self.transformBuffer.write(struct.pack(">L", checksumadjustment))
-
- def _calcTotalSize(self):
- """Calculate total size of WOFF2 font, including any meta- and/or private data."""
- offset = self.directorySize
- for entry in self.tables.values():
- offset += len(entry.toString())
- offset += self.totalCompressedSize
- offset = (offset + 3) & ~3
- offset = self._calcFlavorDataOffsetsAndSize(offset)
- return offset
-
- def _calcFlavorDataOffsetsAndSize(self, start):
- """Calculate offsets and lengths for any meta- and/or private data."""
- offset = start
- data = self.flavorData
- if data.metaData:
- self.metaOrigLength = len(data.metaData)
- self.metaOffset = offset
- self.compressedMetaData = brotli.compress(
- data.metaData, mode=brotli.MODE_TEXT)
- self.metaLength = len(self.compressedMetaData)
- offset += self.metaLength
- else:
- self.metaOffset = self.metaLength = self.metaOrigLength = 0
- self.compressedMetaData = b""
- if data.privData:
- # make sure private data is padded to 4-byte boundary
- offset = (offset + 3) & ~3
- self.privOffset = offset
- self.privLength = len(data.privData)
- offset += self.privLength
- else:
- self.privOffset = self.privLength = 0
- return offset
-
- def _getVersion(self):
- """Return the WOFF2 font's (majorVersion, minorVersion) tuple."""
- data = self.flavorData
- if data.majorVersion is not None and data.minorVersion is not None:
- return data.majorVersion, data.minorVersion
- else:
- # if None, return 'fontRevision' from 'head' table
- if 'head' in self.tables:
- return struct.unpack(">HH", self.tables['head'].data[4:8])
- else:
- return 0, 0
-
- def _packTableDirectory(self):
- """Return WOFF2 table directory data."""
- directory = sstruct.pack(self.directoryFormat, self)
- for entry in self.tables.values():
- directory = directory + entry.toString()
- return directory
-
- def _writeFlavorData(self):
- """Write metadata and/or private data using appropiate padding."""
- compressedMetaData = self.compressedMetaData
- privData = self.flavorData.privData
- if compressedMetaData and privData:
- compressedMetaData = pad(compressedMetaData, size=4)
- if compressedMetaData:
- self.file.seek(self.metaOffset)
- assert self.file.tell() == self.metaOffset
- self.file.write(compressedMetaData)
- if privData:
- self.file.seek(self.privOffset)
- assert self.file.tell() == self.privOffset
- self.file.write(privData)
-
- def reordersTables(self):
- return True
+ flavor = "woff2"
+
+ def __init__(
+ self,
+ file,
+ numTables,
+ sfntVersion="\000\001\000\000",
+ flavor=None,
+ flavorData=None,
+ ):
+ if not haveBrotli:
+ log.error(
+ "The WOFF2 encoder requires the Brotli Python extension, available at: "
+ "https://github.com/google/brotli"
+ )
+ raise ImportError("No module named brotli")
+
+ self.file = file
+ self.numTables = numTables
+ self.sfntVersion = Tag(sfntVersion)
+ self.flavorData = WOFF2FlavorData(data=flavorData)
+
+ self.directoryFormat = woff2DirectoryFormat
+ self.directorySize = woff2DirectorySize
+ self.DirectoryEntry = WOFF2DirectoryEntry
+
+ self.signature = Tag("wOF2")
+
+ self.nextTableOffset = 0
+ self.transformBuffer = BytesIO()
+
+ self.tables = OrderedDict()
+
+ # make empty TTFont to store data while normalising and transforming tables
+ self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False)
+
+ def __setitem__(self, tag, data):
+ """Associate new entry named 'tag' with raw table data."""
+ if tag in self.tables:
+ raise TTLibError("cannot rewrite '%s' table" % tag)
+ if tag == "DSIG":
+ # always drop DSIG table, since the encoding process can invalidate it
+ self.numTables -= 1
+ return
+
+ entry = self.DirectoryEntry()
+ entry.tag = Tag(tag)
+ entry.flags = getKnownTagIndex(entry.tag)
+ # WOFF2 table data are written to disk only on close(), after all tags
+ # have been specified
+ entry.data = data
+
+ self.tables[tag] = entry
+
+ def close(self):
+ """All tags must have been specified. Now write the table data and directory."""
+ if len(self.tables) != self.numTables:
+ raise TTLibError(
+ "wrong number of tables; expected %d, found %d"
+ % (self.numTables, len(self.tables))
+ )
+
+ if self.sfntVersion in ("\x00\x01\x00\x00", "true"):
+ isTrueType = True
+ elif self.sfntVersion == "OTTO":
+ isTrueType = False
+ else:
+ raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)")
+
+ # The WOFF2 spec no longer requires the glyph offsets to be 4-byte aligned.
+ # However, the reference WOFF2 implementation still fails to reconstruct
+ # 'unpadded' glyf tables, therefore we need to 'normalise' them.
+ # See:
+ # https://github.com/khaledhosny/ots/issues/60
+ # https://github.com/google/woff2/issues/15
+ if (
+ isTrueType
+ and "glyf" in self.flavorData.transformedTables
+ and "glyf" in self.tables
+ ):
+ self._normaliseGlyfAndLoca(padding=4)
+ self._setHeadTransformFlag()
+
+ # To pass the legacy OpenType Sanitiser currently included in browsers,
+ # we must sort the table directory and data alphabetically by tag.
+ # See:
+ # https://github.com/google/woff2/pull/3
+ # https://lists.w3.org/Archives/Public/public-webfonts-wg/2015Mar/0000.html
+ #
+ # 2023: We rely on this in _transformTables where we expect that
+ # "loca" comes after "glyf" table.
+ self.tables = OrderedDict(sorted(self.tables.items()))
+
+ self.totalSfntSize = self._calcSFNTChecksumsLengthsAndOffsets()
+
+ fontData = self._transformTables()
+ compressedFont = brotli.compress(fontData, mode=brotli.MODE_FONT)
+
+ self.totalCompressedSize = len(compressedFont)
+ self.length = self._calcTotalSize()
+ self.majorVersion, self.minorVersion = self._getVersion()
+ self.reserved = 0
+
+ directory = self._packTableDirectory()
+ self.file.seek(0)
+ self.file.write(pad(directory + compressedFont, size=4))
+ self._writeFlavorData()
+
+ def _normaliseGlyfAndLoca(self, padding=4):
+ """Recompile glyf and loca tables, aligning glyph offsets to multiples of
+ 'padding' size. Update the head table's 'indexToLocFormat' accordingly while
+ compiling loca.
+ """
+ if self.sfntVersion == "OTTO":
+ return
+
+ for tag in ("maxp", "head", "loca", "glyf", "fvar"):
+ if tag in self.tables:
+ self._decompileTable(tag)
+ self.ttFont["glyf"].padding = padding
+ for tag in ("glyf", "loca"):
+ self._compileTable(tag)
+
+ def _setHeadTransformFlag(self):
+ """Set bit 11 of 'head' table flags to indicate that the font has undergone
+ a lossless modifying transform. Re-compile head table data."""
+ self._decompileTable("head")
+ self.ttFont["head"].flags |= 1 << 11
+ self._compileTable("head")
+
+ def _decompileTable(self, tag):
+ """Fetch table data, decompile it, and store it inside self.ttFont."""
+ tag = Tag(tag)
+ if tag not in self.tables:
+ raise TTLibError("missing required table: %s" % tag)
+ if self.ttFont.isLoaded(tag):
+ return
+ data = self.tables[tag].data
+ if tag == "loca":
+ tableClass = WOFF2LocaTable
+ elif tag == "glyf":
+ tableClass = WOFF2GlyfTable
+ elif tag == "hmtx":
+ tableClass = WOFF2HmtxTable
+ else:
+ tableClass = getTableClass(tag)
+ table = tableClass(tag)
+ self.ttFont.tables[tag] = table
+ table.decompile(data, self.ttFont)
+
+ def _compileTable(self, tag):
+ """Compile table and store it in its 'data' attribute."""
+ self.tables[tag].data = self.ttFont[tag].compile(self.ttFont)
+
+ def _calcSFNTChecksumsLengthsAndOffsets(self):
+ """Compute the 'original' SFNT checksums, lengths and offsets for checksum
+ adjustment calculation. Return the total size of the uncompressed font.
+ """
+ offset = sfntDirectorySize + sfntDirectoryEntrySize * len(self.tables)
+ for tag, entry in self.tables.items():
+ data = entry.data
+ entry.origOffset = offset
+ entry.origLength = len(data)
+ if tag == "head":
+ entry.checkSum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:])
+ else:
+ entry.checkSum = calcChecksum(data)
+ offset += (entry.origLength + 3) & ~3
+ return offset
+
+ def _transformTables(self):
+ """Return transformed font data."""
+ transformedTables = self.flavorData.transformedTables
+ for tag, entry in self.tables.items():
+ data = None
+ if tag in transformedTables:
+ data = self.transformTable(tag)
+ if data is not None:
+ entry.transformed = True
+ if data is None:
+ if tag == "glyf":
+ # Currently we always sort table tags so
+ # 'loca' comes after 'glyf'.
+ transformedTables.discard("loca")
+ # pass-through the table data without transformation
+ data = entry.data
+ entry.transformed = False
+ entry.offset = self.nextTableOffset
+ entry.saveData(self.transformBuffer, data)
+ self.nextTableOffset += entry.length
+ self.writeMasterChecksum()
+ fontData = self.transformBuffer.getvalue()
+ return fontData
+
+ def transformTable(self, tag):
+ """Return transformed table data, or None if some pre-conditions aren't
+ met -- in which case, the non-transformed table data will be used.
+ """
+ if tag == "loca":
+ data = b""
+ elif tag == "glyf":
+ for tag in ("maxp", "head", "loca", "glyf"):
+ self._decompileTable(tag)
+ glyfTable = self.ttFont["glyf"]
+ data = glyfTable.transform(self.ttFont)
+ elif tag == "hmtx":
+ if "glyf" not in self.tables:
+ return
+ for tag in ("maxp", "head", "hhea", "loca", "glyf", "hmtx"):
+ self._decompileTable(tag)
+ hmtxTable = self.ttFont["hmtx"]
+ data = hmtxTable.transform(self.ttFont) # can be None
+ else:
+ raise TTLibError("Transform for table '%s' is unknown" % tag)
+ return data
+
+ def _calcMasterChecksum(self):
+ """Calculate checkSumAdjustment."""
+ tags = list(self.tables.keys())
+ checksums = []
+ for i in range(len(tags)):
+ checksums.append(self.tables[tags[i]].checkSum)
+
+ # Create a SFNT directory for checksum calculation purposes
+ self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(
+ self.numTables, 16
+ )
+ directory = sstruct.pack(sfntDirectoryFormat, self)
+ tables = sorted(self.tables.items())
+ for tag, entry in tables:
+ sfntEntry = SFNTDirectoryEntry()
+ sfntEntry.tag = entry.tag
+ sfntEntry.checkSum = entry.checkSum
+ sfntEntry.offset = entry.origOffset
+ sfntEntry.length = entry.origLength
+ directory = directory + sfntEntry.toString()
+
+ directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize
+ assert directory_end == len(directory)
+
+ checksums.append(calcChecksum(directory))
+ checksum = sum(checksums) & 0xFFFFFFFF
+ # BiboAfba!
+ checksumadjustment = (0xB1B0AFBA - checksum) & 0xFFFFFFFF
+ return checksumadjustment
+
+ def writeMasterChecksum(self):
+ """Write checkSumAdjustment to the transformBuffer."""
+ checksumadjustment = self._calcMasterChecksum()
+ self.transformBuffer.seek(self.tables["head"].offset + 8)
+ self.transformBuffer.write(struct.pack(">L", checksumadjustment))
+
+ def _calcTotalSize(self):
+ """Calculate total size of WOFF2 font, including any meta- and/or private data."""
+ offset = self.directorySize
+ for entry in self.tables.values():
+ offset += len(entry.toString())
+ offset += self.totalCompressedSize
+ offset = (offset + 3) & ~3
+ offset = self._calcFlavorDataOffsetsAndSize(offset)
+ return offset
+
+ def _calcFlavorDataOffsetsAndSize(self, start):
+ """Calculate offsets and lengths for any meta- and/or private data."""
+ offset = start
+ data = self.flavorData
+ if data.metaData:
+ self.metaOrigLength = len(data.metaData)
+ self.metaOffset = offset
+ self.compressedMetaData = brotli.compress(
+ data.metaData, mode=brotli.MODE_TEXT
+ )
+ self.metaLength = len(self.compressedMetaData)
+ offset += self.metaLength
+ else:
+ self.metaOffset = self.metaLength = self.metaOrigLength = 0
+ self.compressedMetaData = b""
+ if data.privData:
+ # make sure private data is padded to 4-byte boundary
+ offset = (offset + 3) & ~3
+ self.privOffset = offset
+ self.privLength = len(data.privData)
+ offset += self.privLength
+ else:
+ self.privOffset = self.privLength = 0
+ return offset
+
+ def _getVersion(self):
+ """Return the WOFF2 font's (majorVersion, minorVersion) tuple."""
+ data = self.flavorData
+ if data.majorVersion is not None and data.minorVersion is not None:
+ return data.majorVersion, data.minorVersion
+ else:
+ # if None, return 'fontRevision' from 'head' table
+ if "head" in self.tables:
+ return struct.unpack(">HH", self.tables["head"].data[4:8])
+ else:
+ return 0, 0
+
+ def _packTableDirectory(self):
+ """Return WOFF2 table directory data."""
+ directory = sstruct.pack(self.directoryFormat, self)
+ for entry in self.tables.values():
+ directory = directory + entry.toString()
+ return directory
+
+ def _writeFlavorData(self):
+ """Write metadata and/or private data using appropiate padding."""
+ compressedMetaData = self.compressedMetaData
+ privData = self.flavorData.privData
+ if compressedMetaData and privData:
+ compressedMetaData = pad(compressedMetaData, size=4)
+ if compressedMetaData:
+ self.file.seek(self.metaOffset)
+ assert self.file.tell() == self.metaOffset
+ self.file.write(compressedMetaData)
+ if privData:
+ self.file.seek(self.privOffset)
+ assert self.file.tell() == self.privOffset
+ self.file.write(privData)
+
+ def reordersTables(self):
+ return True
# -- woff2 directory helpers and cruft
@@ -492,13 +524,70 @@ woff2DirectoryFormat = """
woff2DirectorySize = sstruct.calcsize(woff2DirectoryFormat)
woff2KnownTags = (
- "cmap", "head", "hhea", "hmtx", "maxp", "name", "OS/2", "post", "cvt ",
- "fpgm", "glyf", "loca", "prep", "CFF ", "VORG", "EBDT", "EBLC", "gasp",
- "hdmx", "kern", "LTSH", "PCLT", "VDMX", "vhea", "vmtx", "BASE", "GDEF",
- "GPOS", "GSUB", "EBSC", "JSTF", "MATH", "CBDT", "CBLC", "COLR", "CPAL",
- "SVG ", "sbix", "acnt", "avar", "bdat", "bloc", "bsln", "cvar", "fdsc",
- "feat", "fmtx", "fvar", "gvar", "hsty", "just", "lcar", "mort", "morx",
- "opbd", "prop", "trak", "Zapf", "Silf", "Glat", "Gloc", "Feat", "Sill")
+ "cmap",
+ "head",
+ "hhea",
+ "hmtx",
+ "maxp",
+ "name",
+ "OS/2",
+ "post",
+ "cvt ",
+ "fpgm",
+ "glyf",
+ "loca",
+ "prep",
+ "CFF ",
+ "VORG",
+ "EBDT",
+ "EBLC",
+ "gasp",
+ "hdmx",
+ "kern",
+ "LTSH",
+ "PCLT",
+ "VDMX",
+ "vhea",
+ "vmtx",
+ "BASE",
+ "GDEF",
+ "GPOS",
+ "GSUB",
+ "EBSC",
+ "JSTF",
+ "MATH",
+ "CBDT",
+ "CBLC",
+ "COLR",
+ "CPAL",
+ "SVG ",
+ "sbix",
+ "acnt",
+ "avar",
+ "bdat",
+ "bloc",
+ "bsln",
+ "cvar",
+ "fdsc",
+ "feat",
+ "fmtx",
+ "fvar",
+ "gvar",
+ "hsty",
+ "just",
+ "lcar",
+ "mort",
+ "morx",
+ "opbd",
+ "prop",
+ "trak",
+ "Zapf",
+ "Silf",
+ "Glat",
+ "Gloc",
+ "Feat",
+ "Sill",
+)
woff2FlagsFormat = """
> # big endian
@@ -517,13 +606,16 @@ woff2UnknownTagSize = sstruct.calcsize(woff2UnknownTagFormat)
woff2UnknownTagIndex = 0x3F
woff2Base128MaxSize = 5
-woff2DirectoryEntryMaxSize = woff2FlagsSize + woff2UnknownTagSize + 2 * woff2Base128MaxSize
+woff2DirectoryEntryMaxSize = (
+ woff2FlagsSize + woff2UnknownTagSize + 2 * woff2Base128MaxSize
+)
-woff2TransformedTableTags = ('glyf', 'loca')
+woff2TransformedTableTags = ("glyf", "loca")
woff2GlyfTableFormat = """
> # big endian
- version: L # = 0x00000000
+ version: H # = 0x0000
+ optionFlags: H # Bit 0: we have overlapSimpleBitmap[], Bits 1-15: reserved
numGlyphs: H # Number of glyphs
indexFormat: H # Offset format for loca table
nContourStreamSize: L # Size of nContour stream
@@ -545,988 +637,1049 @@ bboxFormat = """
yMax: h
"""
+woff2OverlapSimpleBitmapFlag = 0x0001
+
def getKnownTagIndex(tag):
- """Return index of 'tag' in woff2KnownTags list. Return 63 if not found."""
- for i in range(len(woff2KnownTags)):
- if tag == woff2KnownTags[i]:
- return i
- return woff2UnknownTagIndex
+ """Return index of 'tag' in woff2KnownTags list. Return 63 if not found."""
+ for i in range(len(woff2KnownTags)):
+ if tag == woff2KnownTags[i]:
+ return i
+ return woff2UnknownTagIndex
class WOFF2DirectoryEntry(DirectoryEntry):
-
- def fromFile(self, file):
- pos = file.tell()
- data = file.read(woff2DirectoryEntryMaxSize)
- left = self.fromString(data)
- consumed = len(data) - len(left)
- file.seek(pos + consumed)
-
- def fromString(self, data):
- if len(data) < 1:
- raise TTLibError("can't read table 'flags': not enough data")
- dummy, data = sstruct.unpack2(woff2FlagsFormat, data, self)
- if self.flags & 0x3F == 0x3F:
- # if bits [0..5] of the flags byte == 63, read a 4-byte arbitrary tag value
- if len(data) < woff2UnknownTagSize:
- raise TTLibError("can't read table 'tag': not enough data")
- dummy, data = sstruct.unpack2(woff2UnknownTagFormat, data, self)
- else:
- # otherwise, tag is derived from a fixed 'Known Tags' table
- self.tag = woff2KnownTags[self.flags & 0x3F]
- self.tag = Tag(self.tag)
- self.origLength, data = unpackBase128(data)
- self.length = self.origLength
- if self.transformed:
- self.length, data = unpackBase128(data)
- if self.tag == 'loca' and self.length != 0:
- raise TTLibError(
- "the transformLength of the 'loca' table must be 0")
- # return left over data
- return data
-
- def toString(self):
- data = bytechr(self.flags)
- if (self.flags & 0x3F) == 0x3F:
- data += struct.pack('>4s', self.tag.tobytes())
- data += packBase128(self.origLength)
- if self.transformed:
- data += packBase128(self.length)
- return data
-
- @property
- def transformVersion(self):
- """Return bits 6-7 of table entry's flags, which indicate the preprocessing
- transformation version number (between 0 and 3).
- """
- return self.flags >> 6
-
- @transformVersion.setter
- def transformVersion(self, value):
- assert 0 <= value <= 3
- self.flags |= value << 6
-
- @property
- def transformed(self):
- """Return True if the table has any transformation, else return False."""
- # For all tables in a font, except for 'glyf' and 'loca', the transformation
- # version 0 indicates the null transform (where the original table data is
- # passed directly to the Brotli compressor). For 'glyf' and 'loca' tables,
- # transformation version 3 indicates the null transform
- if self.tag in {"glyf", "loca"}:
- return self.transformVersion != 3
- else:
- return self.transformVersion != 0
-
- @transformed.setter
- def transformed(self, booleanValue):
- # here we assume that a non-null transform means version 0 for 'glyf' and
- # 'loca' and 1 for every other table (e.g. hmtx); but that may change as
- # new transformation formats are introduced in the future (if ever).
- if self.tag in {"glyf", "loca"}:
- self.transformVersion = 3 if not booleanValue else 0
- else:
- self.transformVersion = int(booleanValue)
-
-
-class WOFF2LocaTable(getTableClass('loca')):
- """Same as parent class. The only difference is that it attempts to preserve
- the 'indexFormat' as encoded in the WOFF2 glyf table.
- """
-
- def __init__(self, tag=None):
- self.tableTag = Tag(tag or 'loca')
-
- def compile(self, ttFont):
- try:
- max_location = max(self.locations)
- except AttributeError:
- self.set([])
- max_location = 0
- if 'glyf' in ttFont and hasattr(ttFont['glyf'], 'indexFormat'):
- # copile loca using the indexFormat specified in the WOFF2 glyf table
- indexFormat = ttFont['glyf'].indexFormat
- if indexFormat == 0:
- if max_location >= 0x20000:
- raise TTLibError("indexFormat is 0 but local offsets > 0x20000")
- if not all(l % 2 == 0 for l in self.locations):
- raise TTLibError("indexFormat is 0 but local offsets not multiples of 2")
- locations = array.array("H")
- for i in range(len(self.locations)):
- locations.append(self.locations[i] // 2)
- else:
- locations = array.array("I", self.locations)
- if sys.byteorder != "big": locations.byteswap()
- data = locations.tobytes()
- else:
- # use the most compact indexFormat given the current glyph offsets
- data = super(WOFF2LocaTable, self).compile(ttFont)
- return data
-
-
-class WOFF2GlyfTable(getTableClass('glyf')):
- """Decoder/Encoder for WOFF2 'glyf' table transform."""
-
- subStreams = (
- 'nContourStream', 'nPointsStream', 'flagStream', 'glyphStream',
- 'compositeStream', 'bboxStream', 'instructionStream')
-
- def __init__(self, tag=None):
- self.tableTag = Tag(tag or 'glyf')
-
- def reconstruct(self, data, ttFont):
- """ Decompile transformed 'glyf' data. """
- inputDataSize = len(data)
-
- if inputDataSize < woff2GlyfTableFormatSize:
- raise TTLibError("not enough 'glyf' data")
- dummy, data = sstruct.unpack2(woff2GlyfTableFormat, data, self)
- offset = woff2GlyfTableFormatSize
-
- for stream in self.subStreams:
- size = getattr(self, stream + 'Size')
- setattr(self, stream, data[:size])
- data = data[size:]
- offset += size
-
- if offset != inputDataSize:
- raise TTLibError(
- "incorrect size of transformed 'glyf' table: expected %d, received %d bytes"
- % (offset, inputDataSize))
-
- bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2
- bboxBitmap = self.bboxStream[:bboxBitmapSize]
- self.bboxBitmap = array.array('B', bboxBitmap)
- self.bboxStream = self.bboxStream[bboxBitmapSize:]
-
- self.nContourStream = array.array("h", self.nContourStream)
- if sys.byteorder != "big": self.nContourStream.byteswap()
- assert len(self.nContourStream) == self.numGlyphs
-
- if 'head' in ttFont:
- ttFont['head'].indexToLocFormat = self.indexFormat
- try:
- self.glyphOrder = ttFont.getGlyphOrder()
- except:
- self.glyphOrder = None
- if self.glyphOrder is None:
- self.glyphOrder = [".notdef"]
- self.glyphOrder.extend(["glyph%.5d" % i for i in range(1, self.numGlyphs)])
- else:
- if len(self.glyphOrder) != self.numGlyphs:
- raise TTLibError(
- "incorrect glyphOrder: expected %d glyphs, found %d" %
- (len(self.glyphOrder), self.numGlyphs))
-
- glyphs = self.glyphs = {}
- for glyphID, glyphName in enumerate(self.glyphOrder):
- glyph = self._decodeGlyph(glyphID)
- glyphs[glyphName] = glyph
-
- def transform(self, ttFont):
- """ Return transformed 'glyf' data """
- self.numGlyphs = len(self.glyphs)
- assert len(self.glyphOrder) == self.numGlyphs
- if 'maxp' in ttFont:
- ttFont['maxp'].numGlyphs = self.numGlyphs
- self.indexFormat = ttFont['head'].indexToLocFormat
-
- for stream in self.subStreams:
- setattr(self, stream, b"")
- bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2
- self.bboxBitmap = array.array('B', [0]*bboxBitmapSize)
-
- for glyphID in range(self.numGlyphs):
- self._encodeGlyph(glyphID)
-
- self.bboxStream = self.bboxBitmap.tobytes() + self.bboxStream
- for stream in self.subStreams:
- setattr(self, stream + 'Size', len(getattr(self, stream)))
- self.version = 0
- data = sstruct.pack(woff2GlyfTableFormat, self)
- data += bytesjoin([getattr(self, s) for s in self.subStreams])
- return data
-
- def _decodeGlyph(self, glyphID):
- glyph = getTableModule('glyf').Glyph()
- glyph.numberOfContours = self.nContourStream[glyphID]
- if glyph.numberOfContours == 0:
- return glyph
- elif glyph.isComposite():
- self._decodeComponents(glyph)
- else:
- self._decodeCoordinates(glyph)
- self._decodeBBox(glyphID, glyph)
- return glyph
-
- def _decodeComponents(self, glyph):
- data = self.compositeStream
- glyph.components = []
- more = 1
- haveInstructions = 0
- while more:
- component = getTableModule('glyf').GlyphComponent()
- more, haveInstr, data = component.decompile(data, self)
- haveInstructions = haveInstructions | haveInstr
- glyph.components.append(component)
- self.compositeStream = data
- if haveInstructions:
- self._decodeInstructions(glyph)
-
- def _decodeCoordinates(self, glyph):
- data = self.nPointsStream
- endPtsOfContours = []
- endPoint = -1
- for i in range(glyph.numberOfContours):
- ptsOfContour, data = unpack255UShort(data)
- endPoint += ptsOfContour
- endPtsOfContours.append(endPoint)
- glyph.endPtsOfContours = endPtsOfContours
- self.nPointsStream = data
- self._decodeTriplets(glyph)
- self._decodeInstructions(glyph)
-
- def _decodeInstructions(self, glyph):
- glyphStream = self.glyphStream
- instructionStream = self.instructionStream
- instructionLength, glyphStream = unpack255UShort(glyphStream)
- glyph.program = ttProgram.Program()
- glyph.program.fromBytecode(instructionStream[:instructionLength])
- self.glyphStream = glyphStream
- self.instructionStream = instructionStream[instructionLength:]
-
- def _decodeBBox(self, glyphID, glyph):
- haveBBox = bool(self.bboxBitmap[glyphID >> 3] & (0x80 >> (glyphID & 7)))
- if glyph.isComposite() and not haveBBox:
- raise TTLibError('no bbox values for composite glyph %d' % glyphID)
- if haveBBox:
- dummy, self.bboxStream = sstruct.unpack2(bboxFormat, self.bboxStream, glyph)
- else:
- glyph.recalcBounds(self)
-
- def _decodeTriplets(self, glyph):
-
- def withSign(flag, baseval):
- assert 0 <= baseval and baseval < 65536, 'integer overflow'
- return baseval if flag & 1 else -baseval
-
- nPoints = glyph.endPtsOfContours[-1] + 1
- flagSize = nPoints
- if flagSize > len(self.flagStream):
- raise TTLibError("not enough 'flagStream' data")
- flagsData = self.flagStream[:flagSize]
- self.flagStream = self.flagStream[flagSize:]
- flags = array.array('B', flagsData)
-
- triplets = array.array('B', self.glyphStream)
- nTriplets = len(triplets)
- assert nPoints <= nTriplets
-
- x = 0
- y = 0
- glyph.coordinates = getTableModule('glyf').GlyphCoordinates.zeros(nPoints)
- glyph.flags = array.array("B")
- tripletIndex = 0
- for i in range(nPoints):
- flag = flags[i]
- onCurve = not bool(flag >> 7)
- flag &= 0x7f
- if flag < 84:
- nBytes = 1
- elif flag < 120:
- nBytes = 2
- elif flag < 124:
- nBytes = 3
- else:
- nBytes = 4
- assert ((tripletIndex + nBytes) <= nTriplets)
- if flag < 10:
- dx = 0
- dy = withSign(flag, ((flag & 14) << 7) + triplets[tripletIndex])
- elif flag < 20:
- dx = withSign(flag, (((flag - 10) & 14) << 7) + triplets[tripletIndex])
- dy = 0
- elif flag < 84:
- b0 = flag - 20
- b1 = triplets[tripletIndex]
- dx = withSign(flag, 1 + (b0 & 0x30) + (b1 >> 4))
- dy = withSign(flag >> 1, 1 + ((b0 & 0x0c) << 2) + (b1 & 0x0f))
- elif flag < 120:
- b0 = flag - 84
- dx = withSign(flag, 1 + ((b0 // 12) << 8) + triplets[tripletIndex])
- dy = withSign(flag >> 1,
- 1 + (((b0 % 12) >> 2) << 8) + triplets[tripletIndex + 1])
- elif flag < 124:
- b2 = triplets[tripletIndex + 1]
- dx = withSign(flag, (triplets[tripletIndex] << 4) + (b2 >> 4))
- dy = withSign(flag >> 1,
- ((b2 & 0x0f) << 8) + triplets[tripletIndex + 2])
- else:
- dx = withSign(flag,
- (triplets[tripletIndex] << 8) + triplets[tripletIndex + 1])
- dy = withSign(flag >> 1,
- (triplets[tripletIndex + 2] << 8) + triplets[tripletIndex + 3])
- tripletIndex += nBytes
- x += dx
- y += dy
- glyph.coordinates[i] = (x, y)
- glyph.flags.append(int(onCurve))
- bytesConsumed = tripletIndex
- self.glyphStream = self.glyphStream[bytesConsumed:]
-
- def _encodeGlyph(self, glyphID):
- glyphName = self.getGlyphName(glyphID)
- glyph = self[glyphName]
- self.nContourStream += struct.pack(">h", glyph.numberOfContours)
- if glyph.numberOfContours == 0:
- return
- elif glyph.isComposite():
- self._encodeComponents(glyph)
- else:
- self._encodeCoordinates(glyph)
- self._encodeBBox(glyphID, glyph)
-
- def _encodeComponents(self, glyph):
- lastcomponent = len(glyph.components) - 1
- more = 1
- haveInstructions = 0
- for i in range(len(glyph.components)):
- if i == lastcomponent:
- haveInstructions = hasattr(glyph, "program")
- more = 0
- component = glyph.components[i]
- self.compositeStream += component.compile(more, haveInstructions, self)
- if haveInstructions:
- self._encodeInstructions(glyph)
-
- def _encodeCoordinates(self, glyph):
- lastEndPoint = -1
- for endPoint in glyph.endPtsOfContours:
- ptsOfContour = endPoint - lastEndPoint
- self.nPointsStream += pack255UShort(ptsOfContour)
- lastEndPoint = endPoint
- self._encodeTriplets(glyph)
- self._encodeInstructions(glyph)
-
- def _encodeInstructions(self, glyph):
- instructions = glyph.program.getBytecode()
- self.glyphStream += pack255UShort(len(instructions))
- self.instructionStream += instructions
-
- def _encodeBBox(self, glyphID, glyph):
- assert glyph.numberOfContours != 0, "empty glyph has no bbox"
- if not glyph.isComposite():
- # for simple glyphs, compare the encoded bounding box info with the calculated
- # values, and if they match omit the bounding box info
- currentBBox = glyph.xMin, glyph.yMin, glyph.xMax, glyph.yMax
- calculatedBBox = calcIntBounds(glyph.coordinates)
- if currentBBox == calculatedBBox:
- return
- self.bboxBitmap[glyphID >> 3] |= 0x80 >> (glyphID & 7)
- self.bboxStream += sstruct.pack(bboxFormat, glyph)
-
- def _encodeTriplets(self, glyph):
- assert len(glyph.coordinates) == len(glyph.flags)
- coordinates = glyph.coordinates.copy()
- coordinates.absoluteToRelative()
-
- flags = array.array('B')
- triplets = array.array('B')
- for i in range(len(coordinates)):
- onCurve = glyph.flags[i] & _g_l_y_f.flagOnCurve
- x, y = coordinates[i]
- absX = abs(x)
- absY = abs(y)
- onCurveBit = 0 if onCurve else 128
- xSignBit = 0 if (x < 0) else 1
- ySignBit = 0 if (y < 0) else 1
- xySignBits = xSignBit + 2 * ySignBit
-
- if x == 0 and absY < 1280:
- flags.append(onCurveBit + ((absY & 0xf00) >> 7) + ySignBit)
- triplets.append(absY & 0xff)
- elif y == 0 and absX < 1280:
- flags.append(onCurveBit + 10 + ((absX & 0xf00) >> 7) + xSignBit)
- triplets.append(absX & 0xff)
- elif absX < 65 and absY < 65:
- flags.append(onCurveBit + 20 + ((absX - 1) & 0x30) + (((absY - 1) & 0x30) >> 2) + xySignBits)
- triplets.append((((absX - 1) & 0xf) << 4) | ((absY - 1) & 0xf))
- elif absX < 769 and absY < 769:
- flags.append(onCurveBit + 84 + 12 * (((absX - 1) & 0x300) >> 8) + (((absY - 1) & 0x300) >> 6) + xySignBits)
- triplets.append((absX - 1) & 0xff)
- triplets.append((absY - 1) & 0xff)
- elif absX < 4096 and absY < 4096:
- flags.append(onCurveBit + 120 + xySignBits)
- triplets.append(absX >> 4)
- triplets.append(((absX & 0xf) << 4) | (absY >> 8))
- triplets.append(absY & 0xff)
- else:
- flags.append(onCurveBit + 124 + xySignBits)
- triplets.append(absX >> 8)
- triplets.append(absX & 0xff)
- triplets.append(absY >> 8)
- triplets.append(absY & 0xff)
-
- self.flagStream += flags.tobytes()
- self.glyphStream += triplets.tobytes()
+ def fromFile(self, file):
+ pos = file.tell()
+ data = file.read(woff2DirectoryEntryMaxSize)
+ left = self.fromString(data)
+ consumed = len(data) - len(left)
+ file.seek(pos + consumed)
+
+ def fromString(self, data):
+ if len(data) < 1:
+ raise TTLibError("can't read table 'flags': not enough data")
+ dummy, data = sstruct.unpack2(woff2FlagsFormat, data, self)
+ if self.flags & 0x3F == 0x3F:
+ # if bits [0..5] of the flags byte == 63, read a 4-byte arbitrary tag value
+ if len(data) < woff2UnknownTagSize:
+ raise TTLibError("can't read table 'tag': not enough data")
+ dummy, data = sstruct.unpack2(woff2UnknownTagFormat, data, self)
+ else:
+ # otherwise, tag is derived from a fixed 'Known Tags' table
+ self.tag = woff2KnownTags[self.flags & 0x3F]
+ self.tag = Tag(self.tag)
+ self.origLength, data = unpackBase128(data)
+ self.length = self.origLength
+ if self.transformed:
+ self.length, data = unpackBase128(data)
+ if self.tag == "loca" and self.length != 0:
+ raise TTLibError("the transformLength of the 'loca' table must be 0")
+ # return left over data
+ return data
+
+ def toString(self):
+ data = bytechr(self.flags)
+ if (self.flags & 0x3F) == 0x3F:
+ data += struct.pack(">4s", self.tag.tobytes())
+ data += packBase128(self.origLength)
+ if self.transformed:
+ data += packBase128(self.length)
+ return data
+
+ @property
+ def transformVersion(self):
+ """Return bits 6-7 of table entry's flags, which indicate the preprocessing
+ transformation version number (between 0 and 3).
+ """
+ return self.flags >> 6
+
+ @transformVersion.setter
+ def transformVersion(self, value):
+ assert 0 <= value <= 3
+ self.flags |= value << 6
+
+ @property
+ def transformed(self):
+ """Return True if the table has any transformation, else return False."""
+ # For all tables in a font, except for 'glyf' and 'loca', the transformation
+ # version 0 indicates the null transform (where the original table data is
+ # passed directly to the Brotli compressor). For 'glyf' and 'loca' tables,
+ # transformation version 3 indicates the null transform
+ if self.tag in {"glyf", "loca"}:
+ return self.transformVersion != 3
+ else:
+ return self.transformVersion != 0
+
+ @transformed.setter
+ def transformed(self, booleanValue):
+ # here we assume that a non-null transform means version 0 for 'glyf' and
+ # 'loca' and 1 for every other table (e.g. hmtx); but that may change as
+ # new transformation formats are introduced in the future (if ever).
+ if self.tag in {"glyf", "loca"}:
+ self.transformVersion = 3 if not booleanValue else 0
+ else:
+ self.transformVersion = int(booleanValue)
+
+
+class WOFF2LocaTable(getTableClass("loca")):
+ """Same as parent class. The only difference is that it attempts to preserve
+ the 'indexFormat' as encoded in the WOFF2 glyf table.
+ """
+
+ def __init__(self, tag=None):
+ self.tableTag = Tag(tag or "loca")
+
+ def compile(self, ttFont):
+ try:
+ max_location = max(self.locations)
+ except AttributeError:
+ self.set([])
+ max_location = 0
+ if "glyf" in ttFont and hasattr(ttFont["glyf"], "indexFormat"):
+ # copile loca using the indexFormat specified in the WOFF2 glyf table
+ indexFormat = ttFont["glyf"].indexFormat
+ if indexFormat == 0:
+ if max_location >= 0x20000:
+ raise TTLibError("indexFormat is 0 but local offsets > 0x20000")
+ if not all(l % 2 == 0 for l in self.locations):
+ raise TTLibError(
+ "indexFormat is 0 but local offsets not multiples of 2"
+ )
+ locations = array.array("H")
+ for i in range(len(self.locations)):
+ locations.append(self.locations[i] // 2)
+ else:
+ locations = array.array("I", self.locations)
+ if sys.byteorder != "big":
+ locations.byteswap()
+ data = locations.tobytes()
+ else:
+ # use the most compact indexFormat given the current glyph offsets
+ data = super(WOFF2LocaTable, self).compile(ttFont)
+ return data
+
+
+class WOFF2GlyfTable(getTableClass("glyf")):
+ """Decoder/Encoder for WOFF2 'glyf' table transform."""
+
+ subStreams = (
+ "nContourStream",
+ "nPointsStream",
+ "flagStream",
+ "glyphStream",
+ "compositeStream",
+ "bboxStream",
+ "instructionStream",
+ )
+
+ def __init__(self, tag=None):
+ self.tableTag = Tag(tag or "glyf")
+
+ def reconstruct(self, data, ttFont):
+ """Decompile transformed 'glyf' data."""
+ inputDataSize = len(data)
+
+ if inputDataSize < woff2GlyfTableFormatSize:
+ raise TTLibError("not enough 'glyf' data")
+ dummy, data = sstruct.unpack2(woff2GlyfTableFormat, data, self)
+ offset = woff2GlyfTableFormatSize
+
+ for stream in self.subStreams:
+ size = getattr(self, stream + "Size")
+ setattr(self, stream, data[:size])
+ data = data[size:]
+ offset += size
+
+ hasOverlapSimpleBitmap = self.optionFlags & woff2OverlapSimpleBitmapFlag
+ self.overlapSimpleBitmap = None
+ if hasOverlapSimpleBitmap:
+ overlapSimpleBitmapSize = (self.numGlyphs + 7) >> 3
+ self.overlapSimpleBitmap = array.array("B", data[:overlapSimpleBitmapSize])
+ offset += overlapSimpleBitmapSize
+
+ if offset != inputDataSize:
+ raise TTLibError(
+ "incorrect size of transformed 'glyf' table: expected %d, received %d bytes"
+ % (offset, inputDataSize)
+ )
+
+ bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2
+ bboxBitmap = self.bboxStream[:bboxBitmapSize]
+ self.bboxBitmap = array.array("B", bboxBitmap)
+ self.bboxStream = self.bboxStream[bboxBitmapSize:]
+
+ self.nContourStream = array.array("h", self.nContourStream)
+ if sys.byteorder != "big":
+ self.nContourStream.byteswap()
+ assert len(self.nContourStream) == self.numGlyphs
+
+ if "head" in ttFont:
+ ttFont["head"].indexToLocFormat = self.indexFormat
+ try:
+ self.glyphOrder = ttFont.getGlyphOrder()
+ except:
+ self.glyphOrder = None
+ if self.glyphOrder is None:
+ self.glyphOrder = [".notdef"]
+ self.glyphOrder.extend(["glyph%.5d" % i for i in range(1, self.numGlyphs)])
+ else:
+ if len(self.glyphOrder) != self.numGlyphs:
+ raise TTLibError(
+ "incorrect glyphOrder: expected %d glyphs, found %d"
+ % (len(self.glyphOrder), self.numGlyphs)
+ )
+
+ glyphs = self.glyphs = {}
+ for glyphID, glyphName in enumerate(self.glyphOrder):
+ glyph = self._decodeGlyph(glyphID)
+ glyphs[glyphName] = glyph
+
+ def transform(self, ttFont):
+ """Return transformed 'glyf' data"""
+ self.numGlyphs = len(self.glyphs)
+ assert len(self.glyphOrder) == self.numGlyphs
+ if "maxp" in ttFont:
+ ttFont["maxp"].numGlyphs = self.numGlyphs
+ self.indexFormat = ttFont["head"].indexToLocFormat
+
+ for stream in self.subStreams:
+ setattr(self, stream, b"")
+ bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2
+ self.bboxBitmap = array.array("B", [0] * bboxBitmapSize)
+
+ self.overlapSimpleBitmap = array.array("B", [0] * ((self.numGlyphs + 7) >> 3))
+ for glyphID in range(self.numGlyphs):
+ try:
+ self._encodeGlyph(glyphID)
+ except NotImplementedError:
+ return None
+ hasOverlapSimpleBitmap = any(self.overlapSimpleBitmap)
+
+ self.bboxStream = self.bboxBitmap.tobytes() + self.bboxStream
+ for stream in self.subStreams:
+ setattr(self, stream + "Size", len(getattr(self, stream)))
+ self.version = 0
+ self.optionFlags = 0
+ if hasOverlapSimpleBitmap:
+ self.optionFlags |= woff2OverlapSimpleBitmapFlag
+ data = sstruct.pack(woff2GlyfTableFormat, self)
+ data += bytesjoin([getattr(self, s) for s in self.subStreams])
+ if hasOverlapSimpleBitmap:
+ data += self.overlapSimpleBitmap.tobytes()
+ return data
+
+ def _decodeGlyph(self, glyphID):
+ glyph = getTableModule("glyf").Glyph()
+ glyph.numberOfContours = self.nContourStream[glyphID]
+ if glyph.numberOfContours == 0:
+ return glyph
+ elif glyph.isComposite():
+ self._decodeComponents(glyph)
+ else:
+ self._decodeCoordinates(glyph)
+ self._decodeOverlapSimpleFlag(glyph, glyphID)
+ self._decodeBBox(glyphID, glyph)
+ return glyph
+
+ def _decodeComponents(self, glyph):
+ data = self.compositeStream
+ glyph.components = []
+ more = 1
+ haveInstructions = 0
+ while more:
+ component = getTableModule("glyf").GlyphComponent()
+ more, haveInstr, data = component.decompile(data, self)
+ haveInstructions = haveInstructions | haveInstr
+ glyph.components.append(component)
+ self.compositeStream = data
+ if haveInstructions:
+ self._decodeInstructions(glyph)
+
+ def _decodeCoordinates(self, glyph):
+ data = self.nPointsStream
+ endPtsOfContours = []
+ endPoint = -1
+ for i in range(glyph.numberOfContours):
+ ptsOfContour, data = unpack255UShort(data)
+ endPoint += ptsOfContour
+ endPtsOfContours.append(endPoint)
+ glyph.endPtsOfContours = endPtsOfContours
+ self.nPointsStream = data
+ self._decodeTriplets(glyph)
+ self._decodeInstructions(glyph)
+
+ def _decodeOverlapSimpleFlag(self, glyph, glyphID):
+ if self.overlapSimpleBitmap is None or glyph.numberOfContours <= 0:
+ return
+ byte = glyphID >> 3
+ bit = glyphID & 7
+ if self.overlapSimpleBitmap[byte] & (0x80 >> bit):
+ glyph.flags[0] |= _g_l_y_f.flagOverlapSimple
+
+ def _decodeInstructions(self, glyph):
+ glyphStream = self.glyphStream
+ instructionStream = self.instructionStream
+ instructionLength, glyphStream = unpack255UShort(glyphStream)
+ glyph.program = ttProgram.Program()
+ glyph.program.fromBytecode(instructionStream[:instructionLength])
+ self.glyphStream = glyphStream
+ self.instructionStream = instructionStream[instructionLength:]
+
+ def _decodeBBox(self, glyphID, glyph):
+ haveBBox = bool(self.bboxBitmap[glyphID >> 3] & (0x80 >> (glyphID & 7)))
+ if glyph.isComposite() and not haveBBox:
+ raise TTLibError("no bbox values for composite glyph %d" % glyphID)
+ if haveBBox:
+ dummy, self.bboxStream = sstruct.unpack2(bboxFormat, self.bboxStream, glyph)
+ else:
+ glyph.recalcBounds(self)
+
+ def _decodeTriplets(self, glyph):
+ def withSign(flag, baseval):
+ assert 0 <= baseval and baseval < 65536, "integer overflow"
+ return baseval if flag & 1 else -baseval
+
+ nPoints = glyph.endPtsOfContours[-1] + 1
+ flagSize = nPoints
+ if flagSize > len(self.flagStream):
+ raise TTLibError("not enough 'flagStream' data")
+ flagsData = self.flagStream[:flagSize]
+ self.flagStream = self.flagStream[flagSize:]
+ flags = array.array("B", flagsData)
+
+ triplets = array.array("B", self.glyphStream)
+ nTriplets = len(triplets)
+ assert nPoints <= nTriplets
+
+ x = 0
+ y = 0
+ glyph.coordinates = getTableModule("glyf").GlyphCoordinates.zeros(nPoints)
+ glyph.flags = array.array("B")
+ tripletIndex = 0
+ for i in range(nPoints):
+ flag = flags[i]
+ onCurve = not bool(flag >> 7)
+ flag &= 0x7F
+ if flag < 84:
+ nBytes = 1
+ elif flag < 120:
+ nBytes = 2
+ elif flag < 124:
+ nBytes = 3
+ else:
+ nBytes = 4
+ assert (tripletIndex + nBytes) <= nTriplets
+ if flag < 10:
+ dx = 0
+ dy = withSign(flag, ((flag & 14) << 7) + triplets[tripletIndex])
+ elif flag < 20:
+ dx = withSign(flag, (((flag - 10) & 14) << 7) + triplets[tripletIndex])
+ dy = 0
+ elif flag < 84:
+ b0 = flag - 20
+ b1 = triplets[tripletIndex]
+ dx = withSign(flag, 1 + (b0 & 0x30) + (b1 >> 4))
+ dy = withSign(flag >> 1, 1 + ((b0 & 0x0C) << 2) + (b1 & 0x0F))
+ elif flag < 120:
+ b0 = flag - 84
+ dx = withSign(flag, 1 + ((b0 // 12) << 8) + triplets[tripletIndex])
+ dy = withSign(
+ flag >> 1, 1 + (((b0 % 12) >> 2) << 8) + triplets[tripletIndex + 1]
+ )
+ elif flag < 124:
+ b2 = triplets[tripletIndex + 1]
+ dx = withSign(flag, (triplets[tripletIndex] << 4) + (b2 >> 4))
+ dy = withSign(
+ flag >> 1, ((b2 & 0x0F) << 8) + triplets[tripletIndex + 2]
+ )
+ else:
+ dx = withSign(
+ flag, (triplets[tripletIndex] << 8) + triplets[tripletIndex + 1]
+ )
+ dy = withSign(
+ flag >> 1,
+ (triplets[tripletIndex + 2] << 8) + triplets[tripletIndex + 3],
+ )
+ tripletIndex += nBytes
+ x += dx
+ y += dy
+ glyph.coordinates[i] = (x, y)
+ glyph.flags.append(int(onCurve))
+ bytesConsumed = tripletIndex
+ self.glyphStream = self.glyphStream[bytesConsumed:]
+
+ def _encodeGlyph(self, glyphID):
+ glyphName = self.getGlyphName(glyphID)
+ glyph = self[glyphName]
+ self.nContourStream += struct.pack(">h", glyph.numberOfContours)
+ if glyph.numberOfContours == 0:
+ return
+ elif glyph.isComposite():
+ self._encodeComponents(glyph)
+ elif glyph.isVarComposite():
+ raise NotImplementedError
+ else:
+ self._encodeCoordinates(glyph)
+ self._encodeOverlapSimpleFlag(glyph, glyphID)
+ self._encodeBBox(glyphID, glyph)
+
+ def _encodeComponents(self, glyph):
+ lastcomponent = len(glyph.components) - 1
+ more = 1
+ haveInstructions = 0
+ for i in range(len(glyph.components)):
+ if i == lastcomponent:
+ haveInstructions = hasattr(glyph, "program")
+ more = 0
+ component = glyph.components[i]
+ self.compositeStream += component.compile(more, haveInstructions, self)
+ if haveInstructions:
+ self._encodeInstructions(glyph)
+
+ def _encodeCoordinates(self, glyph):
+ lastEndPoint = -1
+ if _g_l_y_f.flagCubic in glyph.flags:
+ raise NotImplementedError
+ for endPoint in glyph.endPtsOfContours:
+ ptsOfContour = endPoint - lastEndPoint
+ self.nPointsStream += pack255UShort(ptsOfContour)
+ lastEndPoint = endPoint
+ self._encodeTriplets(glyph)
+ self._encodeInstructions(glyph)
+
+ def _encodeOverlapSimpleFlag(self, glyph, glyphID):
+ if glyph.numberOfContours <= 0:
+ return
+ if glyph.flags[0] & _g_l_y_f.flagOverlapSimple:
+ byte = glyphID >> 3
+ bit = glyphID & 7
+ self.overlapSimpleBitmap[byte] |= 0x80 >> bit
+
+ def _encodeInstructions(self, glyph):
+ instructions = glyph.program.getBytecode()
+ self.glyphStream += pack255UShort(len(instructions))
+ self.instructionStream += instructions
+
+ def _encodeBBox(self, glyphID, glyph):
+ assert glyph.numberOfContours != 0, "empty glyph has no bbox"
+ if not glyph.isComposite():
+ # for simple glyphs, compare the encoded bounding box info with the calculated
+ # values, and if they match omit the bounding box info
+ currentBBox = glyph.xMin, glyph.yMin, glyph.xMax, glyph.yMax
+ calculatedBBox = calcIntBounds(glyph.coordinates)
+ if currentBBox == calculatedBBox:
+ return
+ self.bboxBitmap[glyphID >> 3] |= 0x80 >> (glyphID & 7)
+ self.bboxStream += sstruct.pack(bboxFormat, glyph)
+
+ def _encodeTriplets(self, glyph):
+ assert len(glyph.coordinates) == len(glyph.flags)
+ coordinates = glyph.coordinates.copy()
+ coordinates.absoluteToRelative()
+
+ flags = array.array("B")
+ triplets = array.array("B")
+ for i in range(len(coordinates)):
+ onCurve = glyph.flags[i] & _g_l_y_f.flagOnCurve
+ x, y = coordinates[i]
+ absX = abs(x)
+ absY = abs(y)
+ onCurveBit = 0 if onCurve else 128
+ xSignBit = 0 if (x < 0) else 1
+ ySignBit = 0 if (y < 0) else 1
+ xySignBits = xSignBit + 2 * ySignBit
+
+ if x == 0 and absY < 1280:
+ flags.append(onCurveBit + ((absY & 0xF00) >> 7) + ySignBit)
+ triplets.append(absY & 0xFF)
+ elif y == 0 and absX < 1280:
+ flags.append(onCurveBit + 10 + ((absX & 0xF00) >> 7) + xSignBit)
+ triplets.append(absX & 0xFF)
+ elif absX < 65 and absY < 65:
+ flags.append(
+ onCurveBit
+ + 20
+ + ((absX - 1) & 0x30)
+ + (((absY - 1) & 0x30) >> 2)
+ + xySignBits
+ )
+ triplets.append((((absX - 1) & 0xF) << 4) | ((absY - 1) & 0xF))
+ elif absX < 769 and absY < 769:
+ flags.append(
+ onCurveBit
+ + 84
+ + 12 * (((absX - 1) & 0x300) >> 8)
+ + (((absY - 1) & 0x300) >> 6)
+ + xySignBits
+ )
+ triplets.append((absX - 1) & 0xFF)
+ triplets.append((absY - 1) & 0xFF)
+ elif absX < 4096 and absY < 4096:
+ flags.append(onCurveBit + 120 + xySignBits)
+ triplets.append(absX >> 4)
+ triplets.append(((absX & 0xF) << 4) | (absY >> 8))
+ triplets.append(absY & 0xFF)
+ else:
+ flags.append(onCurveBit + 124 + xySignBits)
+ triplets.append(absX >> 8)
+ triplets.append(absX & 0xFF)
+ triplets.append(absY >> 8)
+ triplets.append(absY & 0xFF)
+
+ self.flagStream += flags.tobytes()
+ self.glyphStream += triplets.tobytes()
class WOFF2HmtxTable(getTableClass("hmtx")):
-
- def __init__(self, tag=None):
- self.tableTag = Tag(tag or 'hmtx')
-
- def reconstruct(self, data, ttFont):
- flags, = struct.unpack(">B", data[:1])
- data = data[1:]
- if flags & 0b11111100 != 0:
- raise TTLibError("Bits 2-7 of '%s' flags are reserved" % self.tableTag)
-
- # When bit 0 is _not_ set, the lsb[] array is present
- hasLsbArray = flags & 1 == 0
- # When bit 1 is _not_ set, the leftSideBearing[] array is present
- hasLeftSideBearingArray = flags & 2 == 0
- if hasLsbArray and hasLeftSideBearingArray:
- raise TTLibError(
- "either bits 0 or 1 (or both) must set in transformed '%s' flags"
- % self.tableTag
- )
-
- glyfTable = ttFont["glyf"]
- headerTable = ttFont["hhea"]
- glyphOrder = glyfTable.glyphOrder
- numGlyphs = len(glyphOrder)
- numberOfHMetrics = min(int(headerTable.numberOfHMetrics), numGlyphs)
-
- assert len(data) >= 2 * numberOfHMetrics
- advanceWidthArray = array.array("H", data[:2 * numberOfHMetrics])
- if sys.byteorder != "big":
- advanceWidthArray.byteswap()
- data = data[2 * numberOfHMetrics:]
-
- if hasLsbArray:
- assert len(data) >= 2 * numberOfHMetrics
- lsbArray = array.array("h", data[:2 * numberOfHMetrics])
- if sys.byteorder != "big":
- lsbArray.byteswap()
- data = data[2 * numberOfHMetrics:]
- else:
- # compute (proportional) glyphs' lsb from their xMin
- lsbArray = array.array("h")
- for i, glyphName in enumerate(glyphOrder):
- if i >= numberOfHMetrics:
- break
- glyph = glyfTable[glyphName]
- xMin = getattr(glyph, "xMin", 0)
- lsbArray.append(xMin)
-
- numberOfSideBearings = numGlyphs - numberOfHMetrics
- if hasLeftSideBearingArray:
- assert len(data) >= 2 * numberOfSideBearings
- leftSideBearingArray = array.array("h", data[:2 * numberOfSideBearings])
- if sys.byteorder != "big":
- leftSideBearingArray.byteswap()
- data = data[2 * numberOfSideBearings:]
- else:
- # compute (monospaced) glyphs' leftSideBearing from their xMin
- leftSideBearingArray = array.array("h")
- for i, glyphName in enumerate(glyphOrder):
- if i < numberOfHMetrics:
- continue
- glyph = glyfTable[glyphName]
- xMin = getattr(glyph, "xMin", 0)
- leftSideBearingArray.append(xMin)
-
- if data:
- raise TTLibError("too much '%s' table data" % self.tableTag)
-
- self.metrics = {}
- for i in range(numberOfHMetrics):
- glyphName = glyphOrder[i]
- advanceWidth, lsb = advanceWidthArray[i], lsbArray[i]
- self.metrics[glyphName] = (advanceWidth, lsb)
- lastAdvance = advanceWidthArray[-1]
- for i in range(numberOfSideBearings):
- glyphName = glyphOrder[i + numberOfHMetrics]
- self.metrics[glyphName] = (lastAdvance, leftSideBearingArray[i])
-
- def transform(self, ttFont):
- glyphOrder = ttFont.getGlyphOrder()
- glyf = ttFont["glyf"]
- hhea = ttFont["hhea"]
- numberOfHMetrics = hhea.numberOfHMetrics
-
- # check if any of the proportional glyphs has left sidebearings that
- # differ from their xMin bounding box values.
- hasLsbArray = False
- for i in range(numberOfHMetrics):
- glyphName = glyphOrder[i]
- lsb = self.metrics[glyphName][1]
- if lsb != getattr(glyf[glyphName], "xMin", 0):
- hasLsbArray = True
- break
-
- # do the same for the monospaced glyphs (if any) at the end of hmtx table
- hasLeftSideBearingArray = False
- for i in range(numberOfHMetrics, len(glyphOrder)):
- glyphName = glyphOrder[i]
- lsb = self.metrics[glyphName][1]
- if lsb != getattr(glyf[glyphName], "xMin", 0):
- hasLeftSideBearingArray = True
- break
-
- # if we need to encode both sidebearings arrays, then no transformation is
- # applicable, and we must use the untransformed hmtx data
- if hasLsbArray and hasLeftSideBearingArray:
- return
-
- # set bit 0 and 1 when the respective arrays are _not_ present
- flags = 0
- if not hasLsbArray:
- flags |= 1 << 0
- if not hasLeftSideBearingArray:
- flags |= 1 << 1
-
- data = struct.pack(">B", flags)
-
- advanceWidthArray = array.array(
- "H",
- [
- self.metrics[glyphName][0]
- for i, glyphName in enumerate(glyphOrder)
- if i < numberOfHMetrics
- ]
- )
- if sys.byteorder != "big":
- advanceWidthArray.byteswap()
- data += advanceWidthArray.tobytes()
-
- if hasLsbArray:
- lsbArray = array.array(
- "h",
- [
- self.metrics[glyphName][1]
- for i, glyphName in enumerate(glyphOrder)
- if i < numberOfHMetrics
- ]
- )
- if sys.byteorder != "big":
- lsbArray.byteswap()
- data += lsbArray.tobytes()
-
- if hasLeftSideBearingArray:
- leftSideBearingArray = array.array(
- "h",
- [
- self.metrics[glyphOrder[i]][1]
- for i in range(numberOfHMetrics, len(glyphOrder))
- ]
- )
- if sys.byteorder != "big":
- leftSideBearingArray.byteswap()
- data += leftSideBearingArray.tobytes()
-
- return data
+ def __init__(self, tag=None):
+ self.tableTag = Tag(tag or "hmtx")
+
+ def reconstruct(self, data, ttFont):
+ (flags,) = struct.unpack(">B", data[:1])
+ data = data[1:]
+ if flags & 0b11111100 != 0:
+ raise TTLibError("Bits 2-7 of '%s' flags are reserved" % self.tableTag)
+
+ # When bit 0 is _not_ set, the lsb[] array is present
+ hasLsbArray = flags & 1 == 0
+ # When bit 1 is _not_ set, the leftSideBearing[] array is present
+ hasLeftSideBearingArray = flags & 2 == 0
+ if hasLsbArray and hasLeftSideBearingArray:
+ raise TTLibError(
+ "either bits 0 or 1 (or both) must set in transformed '%s' flags"
+ % self.tableTag
+ )
+
+ glyfTable = ttFont["glyf"]
+ headerTable = ttFont["hhea"]
+ glyphOrder = glyfTable.glyphOrder
+ numGlyphs = len(glyphOrder)
+ numberOfHMetrics = min(int(headerTable.numberOfHMetrics), numGlyphs)
+
+ assert len(data) >= 2 * numberOfHMetrics
+ advanceWidthArray = array.array("H", data[: 2 * numberOfHMetrics])
+ if sys.byteorder != "big":
+ advanceWidthArray.byteswap()
+ data = data[2 * numberOfHMetrics :]
+
+ if hasLsbArray:
+ assert len(data) >= 2 * numberOfHMetrics
+ lsbArray = array.array("h", data[: 2 * numberOfHMetrics])
+ if sys.byteorder != "big":
+ lsbArray.byteswap()
+ data = data[2 * numberOfHMetrics :]
+ else:
+ # compute (proportional) glyphs' lsb from their xMin
+ lsbArray = array.array("h")
+ for i, glyphName in enumerate(glyphOrder):
+ if i >= numberOfHMetrics:
+ break
+ glyph = glyfTable[glyphName]
+ xMin = getattr(glyph, "xMin", 0)
+ lsbArray.append(xMin)
+
+ numberOfSideBearings = numGlyphs - numberOfHMetrics
+ if hasLeftSideBearingArray:
+ assert len(data) >= 2 * numberOfSideBearings
+ leftSideBearingArray = array.array("h", data[: 2 * numberOfSideBearings])
+ if sys.byteorder != "big":
+ leftSideBearingArray.byteswap()
+ data = data[2 * numberOfSideBearings :]
+ else:
+ # compute (monospaced) glyphs' leftSideBearing from their xMin
+ leftSideBearingArray = array.array("h")
+ for i, glyphName in enumerate(glyphOrder):
+ if i < numberOfHMetrics:
+ continue
+ glyph = glyfTable[glyphName]
+ xMin = getattr(glyph, "xMin", 0)
+ leftSideBearingArray.append(xMin)
+
+ if data:
+ raise TTLibError("too much '%s' table data" % self.tableTag)
+
+ self.metrics = {}
+ for i in range(numberOfHMetrics):
+ glyphName = glyphOrder[i]
+ advanceWidth, lsb = advanceWidthArray[i], lsbArray[i]
+ self.metrics[glyphName] = (advanceWidth, lsb)
+ lastAdvance = advanceWidthArray[-1]
+ for i in range(numberOfSideBearings):
+ glyphName = glyphOrder[i + numberOfHMetrics]
+ self.metrics[glyphName] = (lastAdvance, leftSideBearingArray[i])
+
+ def transform(self, ttFont):
+ glyphOrder = ttFont.getGlyphOrder()
+ glyf = ttFont["glyf"]
+ hhea = ttFont["hhea"]
+ numberOfHMetrics = hhea.numberOfHMetrics
+
+ # check if any of the proportional glyphs has left sidebearings that
+ # differ from their xMin bounding box values.
+ hasLsbArray = False
+ for i in range(numberOfHMetrics):
+ glyphName = glyphOrder[i]
+ lsb = self.metrics[glyphName][1]
+ if lsb != getattr(glyf[glyphName], "xMin", 0):
+ hasLsbArray = True
+ break
+
+ # do the same for the monospaced glyphs (if any) at the end of hmtx table
+ hasLeftSideBearingArray = False
+ for i in range(numberOfHMetrics, len(glyphOrder)):
+ glyphName = glyphOrder[i]
+ lsb = self.metrics[glyphName][1]
+ if lsb != getattr(glyf[glyphName], "xMin", 0):
+ hasLeftSideBearingArray = True
+ break
+
+ # if we need to encode both sidebearings arrays, then no transformation is
+ # applicable, and we must use the untransformed hmtx data
+ if hasLsbArray and hasLeftSideBearingArray:
+ return
+
+ # set bit 0 and 1 when the respective arrays are _not_ present
+ flags = 0
+ if not hasLsbArray:
+ flags |= 1 << 0
+ if not hasLeftSideBearingArray:
+ flags |= 1 << 1
+
+ data = struct.pack(">B", flags)
+
+ advanceWidthArray = array.array(
+ "H",
+ [
+ self.metrics[glyphName][0]
+ for i, glyphName in enumerate(glyphOrder)
+ if i < numberOfHMetrics
+ ],
+ )
+ if sys.byteorder != "big":
+ advanceWidthArray.byteswap()
+ data += advanceWidthArray.tobytes()
+
+ if hasLsbArray:
+ lsbArray = array.array(
+ "h",
+ [
+ self.metrics[glyphName][1]
+ for i, glyphName in enumerate(glyphOrder)
+ if i < numberOfHMetrics
+ ],
+ )
+ if sys.byteorder != "big":
+ lsbArray.byteswap()
+ data += lsbArray.tobytes()
+
+ if hasLeftSideBearingArray:
+ leftSideBearingArray = array.array(
+ "h",
+ [
+ self.metrics[glyphOrder[i]][1]
+ for i in range(numberOfHMetrics, len(glyphOrder))
+ ],
+ )
+ if sys.byteorder != "big":
+ leftSideBearingArray.byteswap()
+ data += leftSideBearingArray.tobytes()
+
+ return data
class WOFF2FlavorData(WOFFFlavorData):
-
- Flavor = 'woff2'
-
- def __init__(self, reader=None, data=None, transformedTables=None):
- """Data class that holds the WOFF2 header major/minor version, any
- metadata or private data (as bytes strings), and the set of
- table tags that have transformations applied (if reader is not None),
- or will have once the WOFF2 font is compiled.
-
- Args:
- reader: an SFNTReader (or subclass) object to read flavor data from.
- data: another WOFFFlavorData object to initialise data from.
- transformedTables: set of strings containing table tags to be transformed.
-
- Raises:
- ImportError if the brotli module is not installed.
-
- NOTE: The 'reader' argument, on the one hand, and the 'data' and
- 'transformedTables' arguments, on the other hand, are mutually exclusive.
- """
- if not haveBrotli:
- raise ImportError("No module named brotli")
-
- if reader is not None:
- if data is not None:
- raise TypeError(
- "'reader' and 'data' arguments are mutually exclusive"
- )
- if transformedTables is not None:
- raise TypeError(
- "'reader' and 'transformedTables' arguments are mutually exclusive"
- )
-
- if transformedTables is not None and (
- "glyf" in transformedTables and "loca" not in transformedTables
- or "loca" in transformedTables and "glyf" not in transformedTables
- ):
- raise ValueError(
- "'glyf' and 'loca' must be transformed (or not) together"
- )
- super(WOFF2FlavorData, self).__init__(reader=reader)
- if reader:
- transformedTables = [
- tag
- for tag, entry in reader.tables.items()
- if entry.transformed
- ]
- elif data:
- self.majorVersion = data.majorVersion
- self.majorVersion = data.minorVersion
- self.metaData = data.metaData
- self.privData = data.privData
- if transformedTables is None and hasattr(data, "transformedTables"):
- transformedTables = data.transformedTables
-
- if transformedTables is None:
- transformedTables = woff2TransformedTableTags
-
- self.transformedTables = set(transformedTables)
-
- def _decompress(self, rawData):
- return brotli.decompress(rawData)
+ Flavor = "woff2"
+
+ def __init__(self, reader=None, data=None, transformedTables=None):
+ """Data class that holds the WOFF2 header major/minor version, any
+ metadata or private data (as bytes strings), and the set of
+ table tags that have transformations applied (if reader is not None),
+ or will have once the WOFF2 font is compiled.
+
+ Args:
+ reader: an SFNTReader (or subclass) object to read flavor data from.
+ data: another WOFFFlavorData object to initialise data from.
+ transformedTables: set of strings containing table tags to be transformed.
+
+ Raises:
+ ImportError if the brotli module is not installed.
+
+ NOTE: The 'reader' argument, on the one hand, and the 'data' and
+ 'transformedTables' arguments, on the other hand, are mutually exclusive.
+ """
+ if not haveBrotli:
+ raise ImportError("No module named brotli")
+
+ if reader is not None:
+ if data is not None:
+ raise TypeError("'reader' and 'data' arguments are mutually exclusive")
+ if transformedTables is not None:
+ raise TypeError(
+ "'reader' and 'transformedTables' arguments are mutually exclusive"
+ )
+
+ if transformedTables is not None and (
+ "glyf" in transformedTables
+ and "loca" not in transformedTables
+ or "loca" in transformedTables
+ and "glyf" not in transformedTables
+ ):
+ raise ValueError("'glyf' and 'loca' must be transformed (or not) together")
+ super(WOFF2FlavorData, self).__init__(reader=reader)
+ if reader:
+ transformedTables = [
+ tag for tag, entry in reader.tables.items() if entry.transformed
+ ]
+ elif data:
+ self.majorVersion = data.majorVersion
+ self.majorVersion = data.minorVersion
+ self.metaData = data.metaData
+ self.privData = data.privData
+ if transformedTables is None and hasattr(data, "transformedTables"):
+ transformedTables = data.transformedTables
+
+ if transformedTables is None:
+ transformedTables = woff2TransformedTableTags
+
+ self.transformedTables = set(transformedTables)
+
+ def _decompress(self, rawData):
+ return brotli.decompress(rawData)
def unpackBase128(data):
- r""" Read one to five bytes from UIntBase128-encoded input string, and return
- a tuple containing the decoded integer plus any leftover data.
-
- >>> unpackBase128(b'\x3f\x00\x00') == (63, b"\x00\x00")
- True
- >>> unpackBase128(b'\x8f\xff\xff\xff\x7f')[0] == 4294967295
- True
- >>> unpackBase128(b'\x80\x80\x3f') # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- File "<stdin>", line 1, in ?
- TTLibError: UIntBase128 value must not start with leading zeros
- >>> unpackBase128(b'\x8f\xff\xff\xff\xff\x7f')[0] # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- File "<stdin>", line 1, in ?
- TTLibError: UIntBase128-encoded sequence is longer than 5 bytes
- >>> unpackBase128(b'\x90\x80\x80\x80\x00')[0] # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- File "<stdin>", line 1, in ?
- TTLibError: UIntBase128 value exceeds 2**32-1
- """
- if len(data) == 0:
- raise TTLibError('not enough data to unpack UIntBase128')
- result = 0
- if byteord(data[0]) == 0x80:
- # font must be rejected if UIntBase128 value starts with 0x80
- raise TTLibError('UIntBase128 value must not start with leading zeros')
- for i in range(woff2Base128MaxSize):
- if len(data) == 0:
- raise TTLibError('not enough data to unpack UIntBase128')
- code = byteord(data[0])
- data = data[1:]
- # if any of the top seven bits are set then we're about to overflow
- if result & 0xFE000000:
- raise TTLibError('UIntBase128 value exceeds 2**32-1')
- # set current value = old value times 128 bitwise-or (byte bitwise-and 127)
- result = (result << 7) | (code & 0x7f)
- # repeat until the most significant bit of byte is false
- if (code & 0x80) == 0:
- # return result plus left over data
- return result, data
- # make sure not to exceed the size bound
- raise TTLibError('UIntBase128-encoded sequence is longer than 5 bytes')
+ r"""Read one to five bytes from UIntBase128-encoded input string, and return
+ a tuple containing the decoded integer plus any leftover data.
+
+ >>> unpackBase128(b'\x3f\x00\x00') == (63, b"\x00\x00")
+ True
+ >>> unpackBase128(b'\x8f\xff\xff\xff\x7f')[0] == 4294967295
+ True
+ >>> unpackBase128(b'\x80\x80\x3f') # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+ TTLibError: UIntBase128 value must not start with leading zeros
+ >>> unpackBase128(b'\x8f\xff\xff\xff\xff\x7f')[0] # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+ TTLibError: UIntBase128-encoded sequence is longer than 5 bytes
+ >>> unpackBase128(b'\x90\x80\x80\x80\x00')[0] # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+ TTLibError: UIntBase128 value exceeds 2**32-1
+ """
+ if len(data) == 0:
+ raise TTLibError("not enough data to unpack UIntBase128")
+ result = 0
+ if byteord(data[0]) == 0x80:
+ # font must be rejected if UIntBase128 value starts with 0x80
+ raise TTLibError("UIntBase128 value must not start with leading zeros")
+ for i in range(woff2Base128MaxSize):
+ if len(data) == 0:
+ raise TTLibError("not enough data to unpack UIntBase128")
+ code = byteord(data[0])
+ data = data[1:]
+ # if any of the top seven bits are set then we're about to overflow
+ if result & 0xFE000000:
+ raise TTLibError("UIntBase128 value exceeds 2**32-1")
+ # set current value = old value times 128 bitwise-or (byte bitwise-and 127)
+ result = (result << 7) | (code & 0x7F)
+ # repeat until the most significant bit of byte is false
+ if (code & 0x80) == 0:
+ # return result plus left over data
+ return result, data
+ # make sure not to exceed the size bound
+ raise TTLibError("UIntBase128-encoded sequence is longer than 5 bytes")
def base128Size(n):
- """ Return the length in bytes of a UIntBase128-encoded sequence with value n.
-
- >>> base128Size(0)
- 1
- >>> base128Size(24567)
- 3
- >>> base128Size(2**32-1)
- 5
- """
- assert n >= 0
- size = 1
- while n >= 128:
- size += 1
- n >>= 7
- return size
+ """Return the length in bytes of a UIntBase128-encoded sequence with value n.
+
+ >>> base128Size(0)
+ 1
+ >>> base128Size(24567)
+ 3
+ >>> base128Size(2**32-1)
+ 5
+ """
+ assert n >= 0
+ size = 1
+ while n >= 128:
+ size += 1
+ n >>= 7
+ return size
def packBase128(n):
- r""" Encode unsigned integer in range 0 to 2**32-1 (inclusive) to a string of
- bytes using UIntBase128 variable-length encoding. Produce the shortest possible
- encoding.
-
- >>> packBase128(63) == b"\x3f"
- True
- >>> packBase128(2**32-1) == b'\x8f\xff\xff\xff\x7f'
- True
- """
- if n < 0 or n >= 2**32:
- raise TTLibError(
- "UIntBase128 format requires 0 <= integer <= 2**32-1")
- data = b''
- size = base128Size(n)
- for i in range(size):
- b = (n >> (7 * (size - i - 1))) & 0x7f
- if i < size - 1:
- b |= 0x80
- data += struct.pack('B', b)
- return data
+ r"""Encode unsigned integer in range 0 to 2**32-1 (inclusive) to a string of
+ bytes using UIntBase128 variable-length encoding. Produce the shortest possible
+ encoding.
+
+ >>> packBase128(63) == b"\x3f"
+ True
+ >>> packBase128(2**32-1) == b'\x8f\xff\xff\xff\x7f'
+ True
+ """
+ if n < 0 or n >= 2**32:
+ raise TTLibError("UIntBase128 format requires 0 <= integer <= 2**32-1")
+ data = b""
+ size = base128Size(n)
+ for i in range(size):
+ b = (n >> (7 * (size - i - 1))) & 0x7F
+ if i < size - 1:
+ b |= 0x80
+ data += struct.pack("B", b)
+ return data
def unpack255UShort(data):
- """ Read one to three bytes from 255UInt16-encoded input string, and return a
- tuple containing the decoded integer plus any leftover data.
-
- >>> unpack255UShort(bytechr(252))[0]
- 252
-
- Note that some numbers (e.g. 506) can have multiple encodings:
- >>> unpack255UShort(struct.pack("BB", 254, 0))[0]
- 506
- >>> unpack255UShort(struct.pack("BB", 255, 253))[0]
- 506
- >>> unpack255UShort(struct.pack("BBB", 253, 1, 250))[0]
- 506
- """
- code = byteord(data[:1])
- data = data[1:]
- if code == 253:
- # read two more bytes as an unsigned short
- if len(data) < 2:
- raise TTLibError('not enough data to unpack 255UInt16')
- result, = struct.unpack(">H", data[:2])
- data = data[2:]
- elif code == 254:
- # read another byte, plus 253 * 2
- if len(data) == 0:
- raise TTLibError('not enough data to unpack 255UInt16')
- result = byteord(data[:1])
- result += 506
- data = data[1:]
- elif code == 255:
- # read another byte, plus 253
- if len(data) == 0:
- raise TTLibError('not enough data to unpack 255UInt16')
- result = byteord(data[:1])
- result += 253
- data = data[1:]
- else:
- # leave as is if lower than 253
- result = code
- # return result plus left over data
- return result, data
+ """Read one to three bytes from 255UInt16-encoded input string, and return a
+ tuple containing the decoded integer plus any leftover data.
+
+ >>> unpack255UShort(bytechr(252))[0]
+ 252
+
+ Note that some numbers (e.g. 506) can have multiple encodings:
+ >>> unpack255UShort(struct.pack("BB", 254, 0))[0]
+ 506
+ >>> unpack255UShort(struct.pack("BB", 255, 253))[0]
+ 506
+ >>> unpack255UShort(struct.pack("BBB", 253, 1, 250))[0]
+ 506
+ """
+ code = byteord(data[:1])
+ data = data[1:]
+ if code == 253:
+ # read two more bytes as an unsigned short
+ if len(data) < 2:
+ raise TTLibError("not enough data to unpack 255UInt16")
+ (result,) = struct.unpack(">H", data[:2])
+ data = data[2:]
+ elif code == 254:
+ # read another byte, plus 253 * 2
+ if len(data) == 0:
+ raise TTLibError("not enough data to unpack 255UInt16")
+ result = byteord(data[:1])
+ result += 506
+ data = data[1:]
+ elif code == 255:
+ # read another byte, plus 253
+ if len(data) == 0:
+ raise TTLibError("not enough data to unpack 255UInt16")
+ result = byteord(data[:1])
+ result += 253
+ data = data[1:]
+ else:
+ # leave as is if lower than 253
+ result = code
+ # return result plus left over data
+ return result, data
def pack255UShort(value):
- r""" Encode unsigned integer in range 0 to 65535 (inclusive) to a bytestring
- using 255UInt16 variable-length encoding.
-
- >>> pack255UShort(252) == b'\xfc'
- True
- >>> pack255UShort(506) == b'\xfe\x00'
- True
- >>> pack255UShort(762) == b'\xfd\x02\xfa'
- True
- """
- if value < 0 or value > 0xFFFF:
- raise TTLibError(
- "255UInt16 format requires 0 <= integer <= 65535")
- if value < 253:
- return struct.pack(">B", value)
- elif value < 506:
- return struct.pack(">BB", 255, value - 253)
- elif value < 762:
- return struct.pack(">BB", 254, value - 506)
- else:
- return struct.pack(">BH", 253, value)
+ r"""Encode unsigned integer in range 0 to 65535 (inclusive) to a bytestring
+ using 255UInt16 variable-length encoding.
+
+ >>> pack255UShort(252) == b'\xfc'
+ True
+ >>> pack255UShort(506) == b'\xfe\x00'
+ True
+ >>> pack255UShort(762) == b'\xfd\x02\xfa'
+ True
+ """
+ if value < 0 or value > 0xFFFF:
+ raise TTLibError("255UInt16 format requires 0 <= integer <= 65535")
+ if value < 253:
+ return struct.pack(">B", value)
+ elif value < 506:
+ return struct.pack(">BB", 255, value - 253)
+ elif value < 762:
+ return struct.pack(">BB", 254, value - 506)
+ else:
+ return struct.pack(">BH", 253, value)
def compress(input_file, output_file, transform_tables=None):
- """Compress OpenType font to WOFF2.
+ """Compress OpenType font to WOFF2.
- Args:
- input_file: a file path, file or file-like object (open in binary mode)
- containing an OpenType font (either CFF- or TrueType-flavored).
- output_file: a file path, file or file-like object where to save the
- compressed WOFF2 font.
- transform_tables: Optional[Iterable[str]]: a set of table tags for which
- to enable preprocessing transformations. By default, only 'glyf'
- and 'loca' tables are transformed. An empty set means disable all
- transformations.
- """
- log.info("Processing %s => %s" % (input_file, output_file))
+ Args:
+ input_file: a file path, file or file-like object (open in binary mode)
+ containing an OpenType font (either CFF- or TrueType-flavored).
+ output_file: a file path, file or file-like object where to save the
+ compressed WOFF2 font.
+ transform_tables: Optional[Iterable[str]]: a set of table tags for which
+ to enable preprocessing transformations. By default, only 'glyf'
+ and 'loca' tables are transformed. An empty set means disable all
+ transformations.
+ """
+ log.info("Processing %s => %s" % (input_file, output_file))
- font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False)
- font.flavor = "woff2"
+ font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False)
+ font.flavor = "woff2"
- if transform_tables is not None:
- font.flavorData = WOFF2FlavorData(
- data=font.flavorData, transformedTables=transform_tables
- )
+ if transform_tables is not None:
+ font.flavorData = WOFF2FlavorData(
+ data=font.flavorData, transformedTables=transform_tables
+ )
- font.save(output_file, reorderTables=False)
+ font.save(output_file, reorderTables=False)
def decompress(input_file, output_file):
- """Decompress WOFF2 font to OpenType font.
+ """Decompress WOFF2 font to OpenType font.
- Args:
- input_file: a file path, file or file-like object (open in binary mode)
- containing a compressed WOFF2 font.
- output_file: a file path, file or file-like object where to save the
- decompressed OpenType font.
- """
- log.info("Processing %s => %s" % (input_file, output_file))
+ Args:
+ input_file: a file path, file or file-like object (open in binary mode)
+ containing a compressed WOFF2 font.
+ output_file: a file path, file or file-like object where to save the
+ decompressed OpenType font.
+ """
+ log.info("Processing %s => %s" % (input_file, output_file))
- font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False)
- font.flavor = None
- font.flavorData = None
- font.save(output_file, reorderTables=True)
+ font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False)
+ font.flavor = None
+ font.flavorData = None
+ font.save(output_file, reorderTables=True)
def main(args=None):
- """Compress and decompress WOFF2 fonts"""
- import argparse
- from fontTools import configLogger
- from fontTools.ttx import makeOutputFileName
-
- class _HelpAction(argparse._HelpAction):
-
- def __call__(self, parser, namespace, values, option_string=None):
- subparsers_actions = [
- action for action in parser._actions
- if isinstance(action, argparse._SubParsersAction)]
- for subparsers_action in subparsers_actions:
- for choice, subparser in subparsers_action.choices.items():
- print(subparser.format_help())
- parser.exit()
-
- class _NoGlyfTransformAction(argparse.Action):
- def __call__(self, parser, namespace, values, option_string=None):
- namespace.transform_tables.difference_update({"glyf", "loca"})
-
- class _HmtxTransformAction(argparse.Action):
- def __call__(self, parser, namespace, values, option_string=None):
- namespace.transform_tables.add("hmtx")
-
- parser = argparse.ArgumentParser(
- prog="fonttools ttLib.woff2",
- description=main.__doc__,
- add_help = False
- )
-
- parser.add_argument('-h', '--help', action=_HelpAction,
- help='show this help message and exit')
-
- parser_group = parser.add_subparsers(title="sub-commands")
- parser_compress = parser_group.add_parser("compress",
- description = "Compress a TTF or OTF font to WOFF2")
- parser_decompress = parser_group.add_parser("decompress",
- description = "Decompress a WOFF2 font to OTF")
-
- for subparser in (parser_compress, parser_decompress):
- group = subparser.add_mutually_exclusive_group(required=False)
- group.add_argument(
- "-v",
- "--verbose",
- action="store_true",
- help="print more messages to console",
- )
- group.add_argument(
- "-q",
- "--quiet",
- action="store_true",
- help="do not print messages to console",
- )
-
- parser_compress.add_argument(
- "input_file",
- metavar="INPUT",
- help="the input OpenType font (.ttf or .otf)",
- )
- parser_decompress.add_argument(
- "input_file",
- metavar="INPUT",
- help="the input WOFF2 font",
- )
-
- parser_compress.add_argument(
- "-o",
- "--output-file",
- metavar="OUTPUT",
- help="the output WOFF2 font",
- )
- parser_decompress.add_argument(
- "-o",
- "--output-file",
- metavar="OUTPUT",
- help="the output OpenType font",
- )
-
- transform_group = parser_compress.add_argument_group()
- transform_group.add_argument(
- "--no-glyf-transform",
- dest="transform_tables",
- nargs=0,
- action=_NoGlyfTransformAction,
- help="Do not transform glyf (and loca) tables",
- )
- transform_group.add_argument(
- "--hmtx-transform",
- dest="transform_tables",
- nargs=0,
- action=_HmtxTransformAction,
- help="Enable optional transformation for 'hmtx' table",
- )
-
- parser_compress.set_defaults(
- subcommand=compress,
- transform_tables={"glyf", "loca"},
- )
- parser_decompress.set_defaults(subcommand=decompress)
-
- options = vars(parser.parse_args(args))
-
- subcommand = options.pop("subcommand", None)
- if not subcommand:
- parser.print_help()
- return
-
- quiet = options.pop("quiet")
- verbose = options.pop("verbose")
- configLogger(
- level=("ERROR" if quiet else "DEBUG" if verbose else "INFO"),
- )
-
- if not options["output_file"]:
- if subcommand is compress:
- extension = ".woff2"
- elif subcommand is decompress:
- # choose .ttf/.otf file extension depending on sfntVersion
- with open(options["input_file"], "rb") as f:
- f.seek(4) # skip 'wOF2' signature
- sfntVersion = f.read(4)
- assert len(sfntVersion) == 4, "not enough data"
- extension = ".otf" if sfntVersion == b"OTTO" else ".ttf"
- else:
- raise AssertionError(subcommand)
- options["output_file"] = makeOutputFileName(
- options["input_file"], outputDir=None, extension=extension
- )
-
- try:
- subcommand(**options)
- except TTLibError as e:
- parser.error(e)
+ """Compress and decompress WOFF2 fonts"""
+ import argparse
+ from fontTools import configLogger
+ from fontTools.ttx import makeOutputFileName
+
+ class _HelpAction(argparse._HelpAction):
+ def __call__(self, parser, namespace, values, option_string=None):
+ subparsers_actions = [
+ action
+ for action in parser._actions
+ if isinstance(action, argparse._SubParsersAction)
+ ]
+ for subparsers_action in subparsers_actions:
+ for choice, subparser in subparsers_action.choices.items():
+ print(subparser.format_help())
+ parser.exit()
+
+ class _NoGlyfTransformAction(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ namespace.transform_tables.difference_update({"glyf", "loca"})
+
+ class _HmtxTransformAction(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ namespace.transform_tables.add("hmtx")
+
+ parser = argparse.ArgumentParser(
+ prog="fonttools ttLib.woff2", description=main.__doc__, add_help=False
+ )
+
+ parser.add_argument(
+ "-h", "--help", action=_HelpAction, help="show this help message and exit"
+ )
+
+ parser_group = parser.add_subparsers(title="sub-commands")
+ parser_compress = parser_group.add_parser(
+ "compress", description="Compress a TTF or OTF font to WOFF2"
+ )
+ parser_decompress = parser_group.add_parser(
+ "decompress", description="Decompress a WOFF2 font to OTF"
+ )
+
+ for subparser in (parser_compress, parser_decompress):
+ group = subparser.add_mutually_exclusive_group(required=False)
+ group.add_argument(
+ "-v",
+ "--verbose",
+ action="store_true",
+ help="print more messages to console",
+ )
+ group.add_argument(
+ "-q",
+ "--quiet",
+ action="store_true",
+ help="do not print messages to console",
+ )
+
+ parser_compress.add_argument(
+ "input_file",
+ metavar="INPUT",
+ help="the input OpenType font (.ttf or .otf)",
+ )
+ parser_decompress.add_argument(
+ "input_file",
+ metavar="INPUT",
+ help="the input WOFF2 font",
+ )
+
+ parser_compress.add_argument(
+ "-o",
+ "--output-file",
+ metavar="OUTPUT",
+ help="the output WOFF2 font",
+ )
+ parser_decompress.add_argument(
+ "-o",
+ "--output-file",
+ metavar="OUTPUT",
+ help="the output OpenType font",
+ )
+
+ transform_group = parser_compress.add_argument_group()
+ transform_group.add_argument(
+ "--no-glyf-transform",
+ dest="transform_tables",
+ nargs=0,
+ action=_NoGlyfTransformAction,
+ help="Do not transform glyf (and loca) tables",
+ )
+ transform_group.add_argument(
+ "--hmtx-transform",
+ dest="transform_tables",
+ nargs=0,
+ action=_HmtxTransformAction,
+ help="Enable optional transformation for 'hmtx' table",
+ )
+
+ parser_compress.set_defaults(
+ subcommand=compress,
+ transform_tables={"glyf", "loca"},
+ )
+ parser_decompress.set_defaults(subcommand=decompress)
+
+ options = vars(parser.parse_args(args))
+
+ subcommand = options.pop("subcommand", None)
+ if not subcommand:
+ parser.print_help()
+ return
+
+ quiet = options.pop("quiet")
+ verbose = options.pop("verbose")
+ configLogger(
+ level=("ERROR" if quiet else "DEBUG" if verbose else "INFO"),
+ )
+
+ if not options["output_file"]:
+ if subcommand is compress:
+ extension = ".woff2"
+ elif subcommand is decompress:
+ # choose .ttf/.otf file extension depending on sfntVersion
+ with open(options["input_file"], "rb") as f:
+ f.seek(4) # skip 'wOF2' signature
+ sfntVersion = f.read(4)
+ assert len(sfntVersion) == 4, "not enough data"
+ extension = ".otf" if sfntVersion == b"OTTO" else ".ttf"
+ else:
+ raise AssertionError(subcommand)
+ options["output_file"] = makeOutputFileName(
+ options["input_file"], outputDir=None, extension=extension
+ )
+
+ try:
+ subcommand(**options)
+ except TTLibError as e:
+ parser.error(e)
if __name__ == "__main__":
- sys.exit(main())
+ sys.exit(main())