aboutsummaryrefslogtreecommitdiff
path: root/Lib
diff options
context:
space:
mode:
Diffstat (limited to 'Lib')
-rw-r--r--Lib/fontTools/__init__.py2
-rw-r--r--Lib/fontTools/agl.py2
-rw-r--r--Lib/fontTools/cffLib/__init__.py111
-rw-r--r--Lib/fontTools/colorLib/builder.py207
-rw-r--r--Lib/fontTools/colorLib/geometry.py10
-rw-r--r--Lib/fontTools/colorLib/table_builder.py61
-rw-r--r--Lib/fontTools/colorLib/unbuilder.py16
-rw-r--r--Lib/fontTools/cu2qu/cli.py10
-rw-r--r--Lib/fontTools/designspaceLib/__init__.py38
-rw-r--r--Lib/fontTools/feaLib/__main__.py5
-rw-r--r--Lib/fontTools/feaLib/ast.py111
-rw-r--r--Lib/fontTools/feaLib/builder.py493
-rw-r--r--Lib/fontTools/feaLib/parser.py203
-rw-r--r--Lib/fontTools/feaLib/variableScalar.py97
-rw-r--r--Lib/fontTools/fontBuilder.py16
-rw-r--r--Lib/fontTools/help.py3
-rw-r--r--Lib/fontTools/merge.py1205
-rw-r--r--Lib/fontTools/merge/__init__.py200
-rw-r--r--Lib/fontTools/merge/__main__.py6
-rw-r--r--Lib/fontTools/merge/base.py76
-rw-r--r--Lib/fontTools/merge/cmap.py129
-rw-r--r--Lib/fontTools/merge/layout.py466
-rw-r--r--Lib/fontTools/merge/options.py85
-rw-r--r--Lib/fontTools/merge/tables.py311
-rw-r--r--Lib/fontTools/merge/unicode.py65
-rw-r--r--Lib/fontTools/merge/util.py131
-rw-r--r--Lib/fontTools/misc/arrayTools.py2
-rw-r--r--Lib/fontTools/misc/bezierTools.py22
-rw-r--r--Lib/fontTools/misc/eexec.py2
-rw-r--r--Lib/fontTools/misc/etree.py6
-rw-r--r--Lib/fontTools/misc/fixedTools.py36
-rw-r--r--Lib/fontTools/misc/intTools.py35
-rw-r--r--Lib/fontTools/misc/macCreatorType.py4
-rw-r--r--Lib/fontTools/misc/macRes.py2
-rw-r--r--Lib/fontTools/misc/plistlib/__init__.py4
-rw-r--r--Lib/fontTools/misc/psCharStrings.py2
-rw-r--r--Lib/fontTools/misc/psLib.py3
-rw-r--r--Lib/fontTools/misc/py23.py58
-rw-r--r--Lib/fontTools/misc/roundTools.py47
-rw-r--r--Lib/fontTools/misc/sstruct.py15
-rw-r--r--Lib/fontTools/misc/testTools.py17
-rw-r--r--Lib/fontTools/misc/textTools.py54
-rw-r--r--Lib/fontTools/misc/transform.py93
-rw-r--r--Lib/fontTools/misc/xmlReader.py11
-rw-r--r--Lib/fontTools/misc/xmlWriter.py4
-rw-r--r--Lib/fontTools/otlLib/builder.py129
-rw-r--r--Lib/fontTools/otlLib/optimize/__init__.py68
-rw-r--r--Lib/fontTools/otlLib/optimize/__main__.py6
-rw-r--r--Lib/fontTools/otlLib/optimize/gpos.py439
-rw-r--r--Lib/fontTools/pens/basePen.py14
-rw-r--r--Lib/fontTools/pens/boundsPen.py8
-rw-r--r--Lib/fontTools/pens/cu2quPen.py29
-rw-r--r--Lib/fontTools/pens/freetypePen.py458
-rw-r--r--Lib/fontTools/pens/pointPen.py89
-rw-r--r--Lib/fontTools/pens/recordingPen.py96
-rw-r--r--Lib/fontTools/pens/reportLabPen.py2
-rw-r--r--Lib/fontTools/pens/svgPathPen.py49
-rw-r--r--Lib/fontTools/pens/transformPen.py2
-rw-r--r--Lib/fontTools/pens/ttGlyphPen.py283
-rw-r--r--Lib/fontTools/subset/__init__.py890
-rw-r--r--Lib/fontTools/subset/cff.py146
-rw-r--r--Lib/fontTools/subset/svg.py248
-rw-r--r--Lib/fontTools/subset/util.py25
-rw-r--r--Lib/fontTools/svgLib/path/__init__.py3
-rw-r--r--Lib/fontTools/t1Lib/__init__.py219
-rw-r--r--Lib/fontTools/tfmLib.py460
-rw-r--r--Lib/fontTools/ttLib/__init__.py43
-rw-r--r--Lib/fontTools/ttLib/removeOverlaps.py68
-rw-r--r--Lib/fontTools/ttLib/sfnt.py9
-rw-r--r--Lib/fontTools/ttLib/tables/C_B_D_T_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/C_O_L_R_.py6
-rw-r--r--Lib/fontTools/ttLib/tables/C_P_A_L_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/D_S_I_G_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/DefaultTable.py2
-rw-r--r--Lib/fontTools/ttLib/tables/E_B_D_T_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/E_B_L_C_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/F__e_a_t.py6
-rw-r--r--Lib/fontTools/ttLib/tables/G_M_A_P_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/G_P_K_G_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/M_E_T_A_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/S_I_N_G_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/S_V_G_.py311
-rw-r--r--Lib/fontTools/ttLib/tables/S__i_l_f.py3
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I_V_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I__1.py2
-rw-r--r--Lib/fontTools/ttLib/tables/TupleVariation.py351
-rw-r--r--Lib/fontTools/ttLib/tables/V_O_R_G_.py11
-rw-r--r--Lib/fontTools/ttLib/tables/_a_n_k_r.py15
-rw-r--r--Lib/fontTools/ttLib/tables/_a_v_a_r.py24
-rw-r--r--Lib/fontTools/ttLib/tables/_c_i_d_g.py25
-rw-r--r--Lib/fontTools/ttLib/tables/_c_m_a_p.py129
-rw-r--r--Lib/fontTools/ttLib/tables/_c_v_a_r.py4
-rw-r--r--Lib/fontTools/ttLib/tables/_f_e_a_t.py6
-rw-r--r--Lib/fontTools/ttLib/tables/_f_v_a_r.py3
-rw-r--r--Lib/fontTools/ttLib/tables/_g_l_y_f.py575
-rw-r--r--Lib/fontTools/ttLib/tables/_g_v_a_r.py23
-rw-r--r--Lib/fontTools/ttLib/tables/_h_d_m_x.py2
-rw-r--r--Lib/fontTools/ttLib/tables/_l_t_a_g.py3
-rw-r--r--Lib/fontTools/ttLib/tables/_m_e_t_a.py3
-rw-r--r--Lib/fontTools/ttLib/tables/_n_a_m_e.py41
-rw-r--r--Lib/fontTools/ttLib/tables/_p_o_s_t.py38
-rw-r--r--Lib/fontTools/ttLib/tables/_t_r_a_k.py3
-rw-r--r--Lib/fontTools/ttLib/tables/asciiTable.py2
-rw-r--r--Lib/fontTools/ttLib/tables/otBase.py215
-rw-r--r--Lib/fontTools/ttLib/tables/otConverters.py266
-rwxr-xr-xLib/fontTools/ttLib/tables/otData.py303
-rw-r--r--Lib/fontTools/ttLib/tables/otTables.py438
-rw-r--r--Lib/fontTools/ttLib/tables/ttProgram.py3
-rw-r--r--Lib/fontTools/ttLib/ttCollection.py18
-rw-r--r--Lib/fontTools/ttLib/ttFont.py318
-rw-r--r--Lib/fontTools/ttLib/woff2.py3
-rw-r--r--Lib/fontTools/ttx.py206
-rwxr-xr-xLib/fontTools/ufoLib/__init__.py4
-rw-r--r--Lib/fontTools/ufoLib/filenames.py92
-rwxr-xr-xLib/fontTools/ufoLib/glifLib.py155
-rw-r--r--Lib/fontTools/ufoLib/plistlib.py2
-rw-r--r--Lib/fontTools/unicodedata/Blocks.py778
-rw-r--r--Lib/fontTools/unicodedata/OTTags.py4
-rw-r--r--Lib/fontTools/unicodedata/ScriptExtensions.py58
-rw-r--r--Lib/fontTools/unicodedata/Scripts.py511
-rw-r--r--Lib/fontTools/unicodedata/__init__.py18
-rw-r--r--Lib/fontTools/varLib/__init__.py50
-rw-r--r--Lib/fontTools/varLib/builder.py71
-rw-r--r--Lib/fontTools/varLib/cff.py37
-rw-r--r--Lib/fontTools/varLib/errors.py34
-rw-r--r--Lib/fontTools/varLib/featureVars.py104
-rw-r--r--Lib/fontTools/varLib/instancer/__init__.py71
-rw-r--r--Lib/fontTools/varLib/merger.py78
-rw-r--r--Lib/fontTools/varLib/models.py938
-rw-r--r--Lib/fontTools/varLib/mutator.py10
-rw-r--r--Lib/fontTools/varLib/varStore.py31
131 files changed, 9391 insertions, 5300 deletions
diff --git a/Lib/fontTools/__init__.py b/Lib/fontTools/__init__.py
index 82da9b70..7fa7b304 100644
--- a/Lib/fontTools/__init__.py
+++ b/Lib/fontTools/__init__.py
@@ -3,6 +3,6 @@ from fontTools.misc.loggingTools import configLogger
log = logging.getLogger(__name__)
-version = __version__ = "4.22.0"
+version = __version__ = "4.31.2"
__all__ = ["version", "log", "configLogger"]
diff --git a/Lib/fontTools/agl.py b/Lib/fontTools/agl.py
index 4f7ff920..cc286e42 100644
--- a/Lib/fontTools/agl.py
+++ b/Lib/fontTools/agl.py
@@ -26,7 +26,7 @@ This is used by fontTools when it has to construct glyph names for a font which
doesn't include any (e.g. format 3.0 post tables).
"""
-from fontTools.misc.py23 import tostr
+from fontTools.misc.textTools import tostr
import re
diff --git a/Lib/fontTools/cffLib/__init__.py b/Lib/fontTools/cffLib/__init__.py
index d4cd7a17..07d0d513 100644
--- a/Lib/fontTools/cffLib/__init__.py
+++ b/Lib/fontTools/cffLib/__init__.py
@@ -11,11 +11,10 @@ the demands of variable fonts. This module parses both original CFF and CFF2.
"""
-from fontTools.misc.py23 import bytechr, byteord, bytesjoin, tobytes, tostr
from fontTools.misc import sstruct
from fontTools.misc import psCharStrings
from fontTools.misc.arrayTools import unionRect, intRect
-from fontTools.misc.textTools import safeEval
+from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes, tostr, safeEval
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables.otBase import OTTableWriter
from fontTools.ttLib.tables.otBase import OTTableReader
@@ -39,6 +38,85 @@ maxStackLimit = 513
# maxstack operator has been deprecated. max stack is now always 513.
+class StopHintCountEvent(Exception):
+ pass
+
+
+class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler):
+ stop_hintcount_ops = ("op_hintmask", "op_cntrmask", "op_rmoveto", "op_hmoveto",
+ "op_vmoveto")
+
+ def __init__(self, localSubrs, globalSubrs, private=None):
+ psCharStrings.SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs,
+ private)
+
+ def execute(self, charString):
+ self.need_hintcount = True # until proven otherwise
+ for op_name in self.stop_hintcount_ops:
+ setattr(self, op_name, self.stop_hint_count)
+
+ if hasattr(charString, '_desubroutinized'):
+ # If a charstring has already been desubroutinized, we will still
+ # need to execute it if we need to count hints in order to
+ # compute the byte length for mask arguments, and haven't finished
+ # counting hints pairs.
+ if self.need_hintcount and self.callingStack:
+ try:
+ psCharStrings.SimpleT2Decompiler.execute(self, charString)
+ except StopHintCountEvent:
+ del self.callingStack[-1]
+ return
+
+ charString._patches = []
+ psCharStrings.SimpleT2Decompiler.execute(self, charString)
+ desubroutinized = charString.program[:]
+ for idx, expansion in reversed(charString._patches):
+ assert idx >= 2
+ assert desubroutinized[idx - 1] in ['callsubr', 'callgsubr'], desubroutinized[idx - 1]
+ assert type(desubroutinized[idx - 2]) == int
+ if expansion[-1] == 'return':
+ expansion = expansion[:-1]
+ desubroutinized[idx-2:idx] = expansion
+ if not self.private.in_cff2:
+ if 'endchar' in desubroutinized:
+ # Cut off after first endchar
+ desubroutinized = desubroutinized[:desubroutinized.index('endchar') + 1]
+ else:
+ if not len(desubroutinized) or desubroutinized[-1] != 'return':
+ desubroutinized.append('return')
+
+ charString._desubroutinized = desubroutinized
+ del charString._patches
+
+ def op_callsubr(self, index):
+ subr = self.localSubrs[self.operandStack[-1]+self.localBias]
+ psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
+ self.processSubr(index, subr)
+
+ def op_callgsubr(self, index):
+ subr = self.globalSubrs[self.operandStack[-1]+self.globalBias]
+ psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
+ self.processSubr(index, subr)
+
+ def stop_hint_count(self, *args):
+ self.need_hintcount = False
+ for op_name in self.stop_hintcount_ops:
+ setattr(self, op_name, None)
+ cs = self.callingStack[-1]
+ if hasattr(cs, '_desubroutinized'):
+ raise StopHintCountEvent()
+
+ def op_hintmask(self, index):
+ psCharStrings.SimpleT2Decompiler.op_hintmask(self, index)
+ if self.need_hintcount:
+ self.stop_hint_count()
+
+ def processSubr(self, index, subr):
+ cs = self.callingStack[-1]
+ if not hasattr(cs, '_desubroutinized'):
+ cs._patches.append((index, subr._desubroutinized))
+
+
class CFFFontSet(object):
"""A CFF font "file" can contain more than one font, although this is
extremely rare (and not allowed within OpenType fonts).
@@ -369,6 +447,35 @@ class CFFFontSet(object):
file.seek(0)
self.decompile(file, otFont, isCFF2=True)
+ def desubroutinize(self):
+ for fontName in self.fontNames:
+ font = self[fontName]
+ cs = font.CharStrings
+ for g in font.charset:
+ c, _ = cs.getItemAndSelector(g)
+ c.decompile()
+ subrs = getattr(c.private, "Subrs", [])
+ decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs, c.private)
+ decompiler.execute(c)
+ c.program = c._desubroutinized
+ del c._desubroutinized
+ # Delete all the local subrs
+ if hasattr(font, 'FDArray'):
+ for fd in font.FDArray:
+ pd = fd.Private
+ if hasattr(pd, 'Subrs'):
+ del pd.Subrs
+ if 'Subrs' in pd.rawDict:
+ del pd.rawDict['Subrs']
+ else:
+ pd = font.Private
+ if hasattr(pd, 'Subrs'):
+ del pd.Subrs
+ if 'Subrs' in pd.rawDict:
+ del pd.rawDict['Subrs']
+ # as well as the global subrs
+ self.GlobalSubrs.clear()
+
class CFFWriter(object):
"""Helper class for serializing CFF data to binary. Used by
diff --git a/Lib/fontTools/colorLib/builder.py b/Lib/fontTools/colorLib/builder.py
index 821244af..2577fa76 100644
--- a/Lib/fontTools/colorLib/builder.py
+++ b/Lib/fontTools/colorLib/builder.py
@@ -21,25 +21,16 @@ from typing import (
TypeVar,
Union,
)
+from fontTools.misc.arrayTools import intRect
from fontTools.misc.fixedTools import fixedToFloat
from fontTools.ttLib.tables import C_O_L_R_
from fontTools.ttLib.tables import C_P_A_L_
from fontTools.ttLib.tables import _n_a_m_e
from fontTools.ttLib.tables import otTables as ot
-from fontTools.ttLib.tables.otTables import (
- ExtendMode,
- CompositeMode,
- VariableValue,
- VariableFloat,
- VariableInt,
-)
+from fontTools.ttLib.tables.otTables import ExtendMode, CompositeMode
from .errors import ColorLibError
from .geometry import round_start_circle_stable_containment
-from .table_builder import (
- convertTupleClass,
- BuildCallback,
- TableBuilder,
-)
+from .table_builder import BuildCallback, TableBuilder
# TODO move type aliases to colorLib.types?
@@ -49,56 +40,54 @@ _PaintInput = Union[int, _Kwargs, ot.Paint, Tuple[str, "_PaintInput"]]
_PaintInputList = Sequence[_PaintInput]
_ColorGlyphsDict = Dict[str, Union[_PaintInputList, _PaintInput]]
_ColorGlyphsV0Dict = Dict[str, Sequence[Tuple[str, int]]]
+_ClipBoxInput = Union[
+ Tuple[int, int, int, int, int], # format 1, variable
+ Tuple[int, int, int, int], # format 0, non-variable
+ ot.ClipBox,
+]
MAX_PAINT_COLR_LAYER_COUNT = 255
-_DEFAULT_ALPHA = VariableFloat(1.0)
+_DEFAULT_ALPHA = 1.0
_MAX_REUSE_LEN = 32
-def _beforeBuildPaintVarRadialGradient(paint, source, srcMapFn=lambda v: v):
- # normalize input types (which may or may not specify a varIdx)
- x0 = convertTupleClass(VariableFloat, source["x0"])
- y0 = convertTupleClass(VariableFloat, source["y0"])
- r0 = convertTupleClass(VariableFloat, source["r0"])
- x1 = convertTupleClass(VariableFloat, source["x1"])
- y1 = convertTupleClass(VariableFloat, source["y1"])
- r1 = convertTupleClass(VariableFloat, source["r1"])
+def _beforeBuildPaintRadialGradient(paint, source):
+ x0 = source["x0"]
+ y0 = source["y0"]
+ r0 = source["r0"]
+ x1 = source["x1"]
+ y1 = source["y1"]
+ r1 = source["r1"]
# TODO apparently no builder_test confirms this works (?)
# avoid abrupt change after rounding when c0 is near c1's perimeter
- c = round_start_circle_stable_containment(
- (x0.value, y0.value), r0.value, (x1.value, y1.value), r1.value
- )
- x0, y0 = x0._replace(value=c.centre[0]), y0._replace(value=c.centre[1])
- r0 = r0._replace(value=c.radius)
+ c = round_start_circle_stable_containment((x0, y0), r0, (x1, y1), r1)
+ x0, y0 = c.centre
+ r0 = c.radius
# update source to ensure paint is built with corrected values
- source["x0"] = srcMapFn(x0)
- source["y0"] = srcMapFn(y0)
- source["r0"] = srcMapFn(r0)
- source["x1"] = srcMapFn(x1)
- source["y1"] = srcMapFn(y1)
- source["r1"] = srcMapFn(r1)
+ source["x0"] = x0
+ source["y0"] = y0
+ source["r0"] = r0
+ source["x1"] = x1
+ source["y1"] = y1
+ source["r1"] = r1
return paint, source
-def _beforeBuildPaintRadialGradient(paint, source):
- return _beforeBuildPaintVarRadialGradient(paint, source, lambda v: v.value)
-
-
-def _defaultColorIndex():
- colorIndex = ot.ColorIndex()
- colorIndex.Alpha = _DEFAULT_ALPHA.value
- return colorIndex
+def _defaultColorStop():
+ colorStop = ot.ColorStop()
+ colorStop.Alpha = _DEFAULT_ALPHA
+ return colorStop
-def _defaultVarColorIndex():
- colorIndex = ot.VarColorIndex()
- colorIndex.Alpha = _DEFAULT_ALPHA
- return colorIndex
+def _defaultVarColorStop():
+ colorStop = ot.VarColorStop()
+ colorStop.Alpha = _DEFAULT_ALPHA
+ return colorStop
def _defaultColorLine():
@@ -113,6 +102,12 @@ def _defaultVarColorLine():
return colorLine
+def _defaultPaintSolid():
+ paint = ot.Paint()
+ paint.Alpha = _DEFAULT_ALPHA
+ return paint
+
+
def _buildPaintCallbacks():
return {
(
@@ -124,11 +119,21 @@ def _buildPaintCallbacks():
BuildCallback.BEFORE_BUILD,
ot.Paint,
ot.PaintFormat.PaintVarRadialGradient,
- ): _beforeBuildPaintVarRadialGradient,
- (BuildCallback.CREATE_DEFAULT, ot.ColorIndex): _defaultColorIndex,
- (BuildCallback.CREATE_DEFAULT, ot.VarColorIndex): _defaultVarColorIndex,
+ ): _beforeBuildPaintRadialGradient,
+ (BuildCallback.CREATE_DEFAULT, ot.ColorStop): _defaultColorStop,
+ (BuildCallback.CREATE_DEFAULT, ot.VarColorStop): _defaultVarColorStop,
(BuildCallback.CREATE_DEFAULT, ot.ColorLine): _defaultColorLine,
(BuildCallback.CREATE_DEFAULT, ot.VarColorLine): _defaultVarColorLine,
+ (
+ BuildCallback.CREATE_DEFAULT,
+ ot.Paint,
+ ot.PaintFormat.PaintSolid,
+ ): _defaultPaintSolid,
+ (
+ BuildCallback.CREATE_DEFAULT,
+ ot.Paint,
+ ot.PaintFormat.PaintVarSolid,
+ ): _defaultPaintSolid,
}
@@ -140,11 +145,11 @@ def populateCOLRv0(
"""Build v0 color layers and add to existing COLR table.
Args:
- table: a raw otTables.COLR() object (not ttLib's table_C_O_L_R_).
+ table: a raw ``otTables.COLR()`` object (not ttLib's ``table_C_O_L_R_``).
colorGlyphsV0: map of base glyph names to lists of (layer glyph names,
- color palette index) tuples.
+ color palette index) tuples. Can be empty.
glyphMap: a map from glyph names to glyph indices, as returned from
- TTFont.getReverseGlyphMap(), to optionally sort base records by GID.
+ ``TTFont.getReverseGlyphMap()``, to optionally sort base records by GID.
"""
if glyphMap is not None:
colorGlyphItems = sorted(
@@ -167,11 +172,14 @@ def populateCOLRv0(
layerRec.PaletteIndex = paletteIndex
layerRecords.append(layerRec)
+ table.BaseGlyphRecordArray = table.LayerRecordArray = None
+ if baseGlyphRecords:
+ table.BaseGlyphRecordArray = ot.BaseGlyphRecordArray()
+ table.BaseGlyphRecordArray.BaseGlyphRecord = baseGlyphRecords
+ if layerRecords:
+ table.LayerRecordArray = ot.LayerRecordArray()
+ table.LayerRecordArray.LayerRecord = layerRecords
table.BaseGlyphRecordCount = len(baseGlyphRecords)
- table.BaseGlyphRecordArray = ot.BaseGlyphRecordArray()
- table.BaseGlyphRecordArray.BaseGlyphRecord = baseGlyphRecords
- table.LayerRecordArray = ot.LayerRecordArray()
- table.LayerRecordArray.LayerRecord = layerRecords
table.LayerRecordCount = len(layerRecords)
@@ -180,12 +188,16 @@ def buildCOLR(
version: Optional[int] = None,
glyphMap: Optional[Mapping[str, int]] = None,
varStore: Optional[ot.VarStore] = None,
+ varIndexMap: Optional[ot.DeltaSetIndexMap] = None,
+ clipBoxes: Optional[Dict[str, _ClipBoxInput]] = None,
) -> C_O_L_R_.table_C_O_L_R_:
"""Build COLR table from color layers mapping.
+
Args:
+
colorGlyphs: map of base glyph name to, either list of (layer glyph name,
- color palette index) tuples for COLRv0; or a single Paint (dict) or
- list of Paint for COLRv1.
+ color palette index) tuples for COLRv0; or a single ``Paint`` (dict) or
+ list of ``Paint`` for COLRv1.
version: the version of COLR table. If None, the version is determined
by the presence of COLRv1 paints or variation data (varStore), which
require version 1; otherwise, if all base glyphs use only simple color
@@ -193,7 +205,11 @@ def buildCOLR(
glyphMap: a map from glyph names to glyph indices, as returned from
TTFont.getReverseGlyphMap(), to optionally sort base records by GID.
varStore: Optional ItemVarationStore for deltas associated with v1 layer.
- Return:
+ varIndexMap: Optional DeltaSetIndexMap for deltas associated with v1 layer.
+ clipBoxes: Optional map of base glyph name to clip box 4- or 5-tuples:
+ (xMin, yMin, xMax, yMax) or (xMin, yMin, xMax, yMax, varIndexBase).
+
+ Returns:
A new COLR table.
"""
self = C_O_L_R_.table_C_O_L_R_()
@@ -209,18 +225,13 @@ def buildCOLR(
else:
# unless explicitly requested for v1 or have variations, in which case
# we encode all color glyph as v1
- colorGlyphsV0, colorGlyphsV1 = None, colorGlyphs
+ colorGlyphsV0, colorGlyphsV1 = {}, colorGlyphs
colr = ot.COLR()
- if colorGlyphsV0:
- populateCOLRv0(colr, colorGlyphsV0, glyphMap)
- else:
- colr.BaseGlyphRecordCount = colr.LayerRecordCount = 0
- colr.BaseGlyphRecordArray = colr.LayerRecordArray = None
+ populateCOLRv0(colr, colorGlyphsV0, glyphMap)
- if colorGlyphsV1:
- colr.LayerV1List, colr.BaseGlyphV1List = buildColrV1(colorGlyphsV1, glyphMap)
+ colr.LayerList, colr.BaseGlyphList = buildColrV1(colorGlyphsV1, glyphMap)
if version is None:
version = 1 if (varStore or colorGlyphsV1) else 0
@@ -231,12 +242,38 @@ def buildCOLR(
if version == 0:
self.ColorLayers = self._decompileColorLayersV0(colr)
else:
+ clipBoxes = {
+ name: clipBoxes[name] for name in clipBoxes or {} if name in colorGlyphsV1
+ }
+ colr.ClipList = buildClipList(clipBoxes) if clipBoxes else None
+ colr.VarIndexMap = varIndexMap
colr.VarStore = varStore
self.table = colr
return self
+def buildClipList(clipBoxes: Dict[str, _ClipBoxInput]) -> ot.ClipList:
+ clipList = ot.ClipList()
+ clipList.Format = 1
+ clipList.clips = {name: buildClipBox(box) for name, box in clipBoxes.items()}
+ return clipList
+
+
+def buildClipBox(clipBox: _ClipBoxInput) -> ot.ClipBox:
+ if isinstance(clipBox, ot.ClipBox):
+ return clipBox
+ n = len(clipBox)
+ clip = ot.ClipBox()
+ if n not in (4, 5):
+ raise ValueError(f"Invalid ClipBox: expected 4 or 5 values, found {n}")
+ clip.xMin, clip.yMin, clip.xMax, clip.yMax = intRect(clipBox[:4])
+ clip.Format = int(n == 5) + 1
+ if n == 5:
+ clip.VarIndexBase = int(clipBox[4])
+ return clip
+
+
class ColorPaletteType(enum.IntFlag):
USABLE_WITH_LIGHT_BACKGROUND = 0x0001
USABLE_WITH_DARK_BACKGROUND = 0x0002
@@ -406,15 +443,13 @@ def _reuse_ranges(num_layers: int) -> Generator[Tuple[int, int], None, None]:
yield (lbound, ubound)
-class LayerV1ListBuilder:
- slices: List[ot.Paint]
+class LayerListBuilder:
layers: List[ot.Paint]
reusePool: Mapping[Tuple[Any, ...], int]
tuples: Mapping[int, Tuple[Any, ...]]
keepAlive: List[ot.Paint] # we need id to remain valid
def __init__(self):
- self.slices = []
self.layers = []
self.reusePool = {}
self.tuples = {}
@@ -459,10 +494,6 @@ class LayerV1ListBuilder:
# COLR layers is unusual in that it modifies shared state
# so we need a callback into an object
def _beforeBuildPaintColrLayers(self, dest, source):
- paint = ot.Paint()
- paint.Format = int(ot.PaintFormat.PaintColrLayers)
- self.slices.append(paint)
-
# Sketchy gymnastics: a sequence input will have dropped it's layers
# into NumLayers; get it back
if isinstance(source.get("NumLayers", None), collections.abc.Sequence):
@@ -520,6 +551,12 @@ class LayerV1ListBuilder:
layers = [listToColrLayers(l) for l in layers]
+ # No reason to have a colr layers with just one entry
+ if len(layers) == 1:
+ return layers[0], {}
+
+ paint = ot.Paint()
+ paint.Format = int(ot.PaintFormat.PaintColrLayers)
paint.NumLayers = len(layers)
paint.FirstLayerIndex = len(self.layers)
self.layers.extend(layers)
@@ -538,17 +575,19 @@ class LayerV1ListBuilder:
def buildPaint(self, paint: _PaintInput) -> ot.Paint:
return self.tableBuilder.build(ot.Paint, paint)
- def build(self) -> ot.LayerV1List:
- layers = ot.LayerV1List()
+ def build(self) -> Optional[ot.LayerList]:
+ if not self.layers:
+ return None
+ layers = ot.LayerList()
layers.LayerCount = len(self.layers)
layers.Paint = self.layers
return layers
-def buildBaseGlyphV1Record(
- baseGlyph: str, layerBuilder: LayerV1ListBuilder, paint: _PaintInput
-) -> ot.BaseGlyphV1List:
- self = ot.BaseGlyphV1Record()
+def buildBaseGlyphPaintRecord(
+ baseGlyph: str, layerBuilder: LayerListBuilder, paint: _PaintInput
+) -> ot.BaseGlyphList:
+ self = ot.BaseGlyphPaintRecord()
self.BaseGlyph = baseGlyph
self.Paint = layerBuilder.buildPaint(paint)
return self
@@ -564,7 +603,7 @@ def _format_glyph_errors(errors: Mapping[str, Exception]) -> str:
def buildColrV1(
colorGlyphs: _ColorGlyphsDict,
glyphMap: Optional[Mapping[str, int]] = None,
-) -> Tuple[ot.LayerV1List, ot.BaseGlyphV1List]:
+) -> Tuple[Optional[ot.LayerList], ot.BaseGlyphList]:
if glyphMap is not None:
colorGlyphItems = sorted(
colorGlyphs.items(), key=lambda item: glyphMap[item[0]]
@@ -574,24 +613,24 @@ def buildColrV1(
errors = {}
baseGlyphs = []
- layerBuilder = LayerV1ListBuilder()
+ layerBuilder = LayerListBuilder()
for baseGlyph, paint in colorGlyphItems:
try:
- baseGlyphs.append(buildBaseGlyphV1Record(baseGlyph, layerBuilder, paint))
+ baseGlyphs.append(buildBaseGlyphPaintRecord(baseGlyph, layerBuilder, paint))
except (ColorLibError, OverflowError, ValueError, TypeError) as e:
errors[baseGlyph] = e
if errors:
failed_glyphs = _format_glyph_errors(errors)
- exc = ColorLibError(f"Failed to build BaseGlyphV1List:\n{failed_glyphs}")
+ exc = ColorLibError(f"Failed to build BaseGlyphList:\n{failed_glyphs}")
exc.errors = errors
raise exc from next(iter(errors.values()))
layers = layerBuilder.build()
- glyphs = ot.BaseGlyphV1List()
+ glyphs = ot.BaseGlyphList()
glyphs.BaseGlyphCount = len(baseGlyphs)
- glyphs.BaseGlyphV1Record = baseGlyphs
+ glyphs.BaseGlyphPaintRecord = baseGlyphs
return (layers, glyphs)
diff --git a/Lib/fontTools/colorLib/geometry.py b/Lib/fontTools/colorLib/geometry.py
index e62aead1..1ce161bf 100644
--- a/Lib/fontTools/colorLib/geometry.py
+++ b/Lib/fontTools/colorLib/geometry.py
@@ -1,6 +1,6 @@
"""Helpers for manipulating 2D points and vectors in COLR table."""
-from math import copysign, cos, hypot, pi
+from math import copysign, cos, hypot, isclose, pi
from fontTools.misc.roundTools import otRound
@@ -19,9 +19,7 @@ def _unit_vector(vec):
return (vec[0] / length, vec[1] / length)
-# This is the same tolerance used by Skia's SkTwoPointConicalGradient.cpp to detect
-# when a radial gradient's focal point lies on the end circle.
-_NEARLY_ZERO = 1 / (1 << 12) # 0.000244140625
+_CIRCLE_INSIDE_TOLERANCE = 1e-4
# The unit vector's X and Y components are respectively
@@ -64,10 +62,10 @@ class Circle:
def round(self):
return Circle(_round_point(self.centre), otRound(self.radius))
- def inside(self, outer_circle):
+ def inside(self, outer_circle, tolerance=_CIRCLE_INSIDE_TOLERANCE):
dist = self.radius + hypot(*_vector_between(self.centre, outer_circle.centre))
return (
- abs(outer_circle.radius - dist) <= _NEARLY_ZERO
+ isclose(outer_circle.radius, dist, rel_tol=_CIRCLE_INSIDE_TOLERANCE)
or outer_circle.radius > dist
)
diff --git a/Lib/fontTools/colorLib/table_builder.py b/Lib/fontTools/colorLib/table_builder.py
index 6fba6b0f..763115b9 100644
--- a/Lib/fontTools/colorLib/table_builder.py
+++ b/Lib/fontTools/colorLib/table_builder.py
@@ -17,10 +17,9 @@ from fontTools.ttLib.tables.otConverters import (
Short,
UInt8,
UShort,
- VarInt16,
- VarUInt16,
IntValue,
FloatValue,
+ OptionalValue,
)
from fontTools.misc.roundTools import otRound
@@ -39,7 +38,7 @@ class BuildCallback(enum.Enum):
"""
AFTER_BUILD = enum.auto()
- """Keyed on (CREATE_DEFAULT, class).
+ """Keyed on (CREATE_DEFAULT, class[, Format if available]).
Receives no arguments.
Should return a new instance of class.
"""
@@ -50,37 +49,29 @@ def _assignable(convertersByName):
return {k: v for k, v in convertersByName.items() if not isinstance(v, ComputedInt)}
-def convertTupleClass(tupleClass, value):
- if isinstance(value, tupleClass):
- return value
- if isinstance(value, tuple):
- return tupleClass(*value)
- return tupleClass(value)
-
-
def _isNonStrSequence(value):
return isinstance(value, collections.abc.Sequence) and not isinstance(value, str)
-def _set_format(dest, source):
+def _split_format(cls, source):
if _isNonStrSequence(source):
- assert len(source) > 0, f"{type(dest)} needs at least format from {source}"
- dest.Format = source[0]
- source = source[1:]
+ assert len(source) > 0, f"{cls} needs at least format from {source}"
+ fmt, remainder = source[0], source[1:]
elif isinstance(source, collections.abc.Mapping):
- assert "Format" in source, f"{type(dest)} needs at least Format from {source}"
- dest.Format = source["Format"]
+ assert "Format" in source, f"{cls} needs at least Format from {source}"
+ remainder = source.copy()
+ fmt = remainder.pop("Format")
else:
- raise ValueError(f"Not sure how to populate {type(dest)} from {source}")
+ raise ValueError(f"Not sure how to populate {cls} from {source}")
assert isinstance(
- dest.Format, collections.abc.Hashable
- ), f"{type(dest)} Format is not hashable: {dest.Format}"
+ fmt, collections.abc.Hashable
+ ), f"{cls} Format is not hashable: {fmt!r}"
assert (
- dest.Format in dest.convertersByName
- ), f"{dest.Format} invalid Format of {cls}"
+ fmt in cls.convertersByName
+ ), f"{cls} invalid Format: {fmt!r}"
- return source
+ return fmt, remainder
class TableBuilder:
@@ -97,13 +88,9 @@ class TableBuilder:
self._callbackTable = callbackTable
def _convert(self, dest, field, converter, value):
- tupleClass = getattr(converter, "tupleClass", None)
enumClass = getattr(converter, "enumClass", None)
- if tupleClass:
- value = convertTupleClass(tupleClass, value)
-
- elif enumClass:
+ if enumClass:
if isinstance(value, enumClass):
pass
elif isinstance(value, str):
@@ -140,6 +127,11 @@ class TableBuilder:
return source
callbackKey = (cls,)
+ fmt = None
+ if issubclass(cls, FormatSwitchingBaseTable):
+ fmt, source = _split_format(cls, source)
+ callbackKey = (cls, fmt)
+
dest = self._callbackTable.get(
(BuildCallback.CREATE_DEFAULT,) + callbackKey, lambda: cls()
)()
@@ -150,11 +142,9 @@ class TableBuilder:
# For format switchers we need to resolve converters based on format
if issubclass(cls, FormatSwitchingBaseTable):
- source = _set_format(dest, source)
-
+ dest.Format = fmt
convByName = _assignable(convByName[dest.Format])
skippedFields.add("Format")
- callbackKey = (cls, dest.Format)
# Convert sequence => mapping so before thunk only has to handle one format
if _isNonStrSequence(source):
@@ -182,6 +172,10 @@ class TableBuilder:
# let's try as a 1-tuple
dest = self.build(cls, (source,))
+ for field, conv in convByName.items():
+ if not hasattr(dest, field) and isinstance(conv, OptionalValue):
+ setattr(dest, field, conv.DEFAULT)
+
dest = self._callbackTable.get(
(BuildCallback.AFTER_BUILD,) + callbackKey, lambda d: d
)(dest)
@@ -210,11 +204,8 @@ class TableUnbuilder:
continue
value = getattr(table, converter.name)
- tupleClass = getattr(converter, "tupleClass", None)
enumClass = getattr(converter, "enumClass", None)
- if tupleClass:
- source[converter.name] = tuple(value)
- elif enumClass:
+ if enumClass:
source[converter.name] = value.name.lower()
elif isinstance(converter, Struct):
if converter.repeat:
diff --git a/Lib/fontTools/colorLib/unbuilder.py b/Lib/fontTools/colorLib/unbuilder.py
index 43582bde..03458907 100644
--- a/Lib/fontTools/colorLib/unbuilder.py
+++ b/Lib/fontTools/colorLib/unbuilder.py
@@ -2,11 +2,14 @@ from fontTools.ttLib.tables import otTables as ot
from .table_builder import TableUnbuilder
-def unbuildColrV1(layerV1List, baseGlyphV1List):
- unbuilder = LayerV1ListUnbuilder(layerV1List.Paint)
+def unbuildColrV1(layerList, baseGlyphList):
+ layers = []
+ if layerList:
+ layers = layerList.Paint
+ unbuilder = LayerListUnbuilder(layers)
return {
rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)
- for rec in baseGlyphV1List.BaseGlyphV1Record
+ for rec in baseGlyphList.BaseGlyphPaintRecord
}
@@ -18,7 +21,7 @@ def _flatten(lst):
yield el
-class LayerV1ListUnbuilder:
+class LayerListUnbuilder:
def __init__(self, layers):
self.layers = layers
@@ -71,9 +74,8 @@ if __name__ == "__main__":
sys.exit(f"error: No COLR table version=1 found in {fontfile}")
colorGlyphs = unbuildColrV1(
- colr.table.LayerV1List,
- colr.table.BaseGlyphV1List,
- ignoreVarIdx=not colr.table.VarStore,
+ colr.table.LayerList,
+ colr.table.BaseGlyphList,
)
pprint(colorGlyphs)
diff --git a/Lib/fontTools/cu2qu/cli.py b/Lib/fontTools/cu2qu/cli.py
index d4e83b88..34520fc0 100644
--- a/Lib/fontTools/cu2qu/cli.py
+++ b/Lib/fontTools/cu2qu/cli.py
@@ -29,8 +29,14 @@ def _cpu_count():
return 1
+def open_ufo(path):
+ if hasattr(ufo_module.Font, "open"): # ufoLib2
+ return ufo_module.Font.open(path)
+ return ufo_module.Font(path) # defcon
+
+
def _font_to_quadratic(input_path, output_path=None, **kwargs):
- ufo = ufo_module.Font(input_path)
+ ufo = open_ufo(input_path)
logger.info('Converting curves for %s', input_path)
if font_to_quadratic(ufo, **kwargs):
logger.info("Saving %s", output_path)
@@ -152,7 +158,7 @@ def main(args=None):
if options.interpolatable:
logger.info('Converting curves compatibly')
- ufos = [ufo_module.Font(infile) for infile in options.infiles]
+ ufos = [open_ufo(infile) for infile in options.infiles]
if fonts_to_quadratic(ufos, **kwargs):
for ufo, output_path in zip(ufos, output_paths):
logger.info("Saving %s", output_path)
diff --git a/Lib/fontTools/designspaceLib/__init__.py b/Lib/fontTools/designspaceLib/__init__.py
index 9ea22fe6..4b706827 100644
--- a/Lib/fontTools/designspaceLib/__init__.py
+++ b/Lib/fontTools/designspaceLib/__init__.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
-from fontTools.misc.py23 import tobytes, tostr
from fontTools.misc.loggingTools import LogMixin
+from fontTools.misc.textTools import tobytes, tostr
import collections
from io import BytesIO, StringIO
import os
@@ -33,6 +33,9 @@ def posix(path):
if path.startswith('/'):
# The above transformation loses absolute paths
new_path = '/' + new_path
+ elif path.startswith(r'\\'):
+ # The above transformation loses leading slashes of UNC path mounts
+ new_path = '//' + new_path
return new_path
@@ -157,18 +160,22 @@ class SourceDescriptor(SimpleDescriptor):
class RuleDescriptor(SimpleDescriptor):
- """<!-- optional: list of substitution rules -->
- <rules>
- <rule name="vertical.bars">
- <conditionset>
- <condition minimum="250.000000" maximum="750.000000" name="weight"/>
- <condition minimum="100" name="width"/>
- <condition minimum="10" maximum="40" name="optical"/>
- </conditionset>
- <sub name="cent" with="cent.alt"/>
- <sub name="dollar" with="dollar.alt"/>
- </rule>
- </rules>
+ """Represents the rule descriptor element
+
+ .. code-block:: xml
+
+ <!-- optional: list of substitution rules -->
+ <rules>
+ <rule name="vertical.bars">
+ <conditionset>
+ <condition minimum="250.000000" maximum="750.000000" name="weight"/>
+ <condition minimum="100" name="width"/>
+ <condition minimum="10" maximum="40" name="optical"/>
+ </conditionset>
+ <sub name="cent" with="cent.alt"/>
+ <sub name="dollar" with="dollar.alt"/>
+ </rule>
+ </rules>
"""
_attrs = ['name', 'conditionSets', 'subs'] # what do we need here
@@ -993,7 +1000,10 @@ class BaseDocReader(LogMixin):
def readGlyphElement(self, glyphElement, instanceObject):
"""
- Read the glyph element.
+ Read the glyph element:
+
+ .. code-block:: xml
+
<glyph name="b" unicode="0x62"/>
<glyph name="b"/>
<glyph name="b">
diff --git a/Lib/fontTools/feaLib/__main__.py b/Lib/fontTools/feaLib/__main__.py
index 99c64231..a45230e8 100644
--- a/Lib/fontTools/feaLib/__main__.py
+++ b/Lib/fontTools/feaLib/__main__.py
@@ -12,7 +12,7 @@ log = logging.getLogger("fontTools.feaLib")
def main(args=None):
- """Add features from a feature file (.fea) into a OTF font"""
+ """Add features from a feature file (.fea) into an OTF font"""
parser = argparse.ArgumentParser(
description="Use fontTools to compile OpenType feature files (*.fea)."
)
@@ -46,7 +46,7 @@ def main(args=None):
parser.add_argument(
"-v",
"--verbose",
- help="increase the logger verbosity. Multiple -v " "options are allowed.",
+ help="Increase the logger verbosity. Multiple -v " "options are allowed.",
action="count",
default=0,
)
@@ -70,6 +70,7 @@ def main(args=None):
if options.traceback:
raise
log.error(e)
+ sys.exit(1)
font.save(output_font)
diff --git a/Lib/fontTools/feaLib/ast.py b/Lib/fontTools/feaLib/ast.py
index 763d0d2c..1273343d 100644
--- a/Lib/fontTools/feaLib/ast.py
+++ b/Lib/fontTools/feaLib/ast.py
@@ -1,7 +1,7 @@
-from fontTools.misc.py23 import byteord, tobytes
from fontTools.feaLib.error import FeatureLibError
from fontTools.feaLib.location import FeatureLibLocation
from fontTools.misc.encodingTools import getEncoding
+from fontTools.misc.textTools import byteord, tobytes
from collections import OrderedDict
import itertools
@@ -34,6 +34,7 @@ __all__ = [
"ChainContextPosStatement",
"ChainContextSubstStatement",
"CharacterStatement",
+ "ConditionsetStatement",
"CursivePosStatement",
"ElidedFallbackName",
"ElidedFallbackNameID",
@@ -700,7 +701,7 @@ class AttachStatement(Statement):
class ChainContextPosStatement(Statement):
- """A chained contextual positioning statement.
+ r"""A chained contextual positioning statement.
``prefix``, ``glyphs``, and ``suffix`` should be lists of
`glyph-containing objects`_ .
@@ -758,7 +759,7 @@ class ChainContextPosStatement(Statement):
class ChainContextSubstStatement(Statement):
- """A chained contextual substitution statement.
+ r"""A chained contextual substitution statement.
``prefix``, ``glyphs``, and ``suffix`` should be lists of
`glyph-containing objects`_ .
@@ -1258,9 +1259,25 @@ class MultipleSubstStatement(Statement):
"""Calls the builder object's ``add_multiple_subst`` callback."""
prefix = [p.glyphSet() for p in self.prefix]
suffix = [s.glyphSet() for s in self.suffix]
- builder.add_multiple_subst(
- self.location, prefix, self.glyph, suffix, self.replacement, self.forceChain
- )
+ if not self.replacement and hasattr(self.glyph, "glyphSet"):
+ for glyph in self.glyph.glyphSet():
+ builder.add_multiple_subst(
+ self.location,
+ prefix,
+ glyph,
+ suffix,
+ self.replacement,
+ self.forceChain,
+ )
+ else:
+ builder.add_multiple_subst(
+ self.location,
+ prefix,
+ self.glyph,
+ suffix,
+ self.replacement,
+ self.forceChain,
+ )
def asFea(self, indent=""):
res = "sub "
@@ -1314,10 +1331,16 @@ class PairPosStatement(Statement):
"""
if self.enumerated:
g = [self.glyphs1.glyphSet(), self.glyphs2.glyphSet()]
+ seen_pair = False
for glyph1, glyph2 in itertools.product(*g):
+ seen_pair = True
builder.add_specific_pair_pos(
self.location, glyph1, self.valuerecord1, glyph2, self.valuerecord2
)
+ if not seen_pair:
+ raise FeatureLibError(
+ "Empty glyph class in positioning rule", self.location
+ )
return
is_specific = isinstance(self.glyphs1, GlyphName) and isinstance(
@@ -2027,3 +2050,79 @@ class AxisValueLocationStatement(Statement):
res += f"location {self.tag} "
res += f"{' '.join(str(i) for i in self.values)};\n"
return res
+
+
+class ConditionsetStatement(Statement):
+ """
+ A variable layout conditionset
+
+ Args:
+ name (str): the name of this conditionset
+ conditions (dict): a dictionary mapping axis tags to a
+ tuple of (min,max) userspace coordinates.
+ """
+
+ def __init__(self, name, conditions, location=None):
+ Statement.__init__(self, location)
+ self.name = name
+ self.conditions = conditions
+
+ def build(self, builder):
+ builder.add_conditionset(self.name, self.conditions)
+
+ def asFea(self, res="", indent=""):
+ res += indent + f"conditionset {self.name} " + "{\n"
+ for tag, (minvalue, maxvalue) in self.conditions.items():
+ res += indent + SHIFT + f"{tag} {minvalue} {maxvalue};\n"
+ res += indent + "}" + f" {self.name};\n"
+ return res
+
+
+class VariationBlock(Block):
+ """A variation feature block, applicable in a given set of conditions."""
+
+ def __init__(self, name, conditionset, use_extension=False, location=None):
+ Block.__init__(self, location)
+ self.name, self.conditionset, self.use_extension = (
+ name,
+ conditionset,
+ use_extension,
+ )
+
+ def build(self, builder):
+ """Call the ``start_feature`` callback on the builder object, visit
+ all the statements in this feature, and then call ``end_feature``."""
+ builder.start_feature(self.location, self.name)
+ if (
+ self.conditionset != "NULL"
+ and self.conditionset not in builder.conditionsets_
+ ):
+ raise FeatureLibError(
+ f"variation block used undefined conditionset {self.conditionset}",
+ self.location,
+ )
+
+ # language exclude_dflt statements modify builder.features_
+ # limit them to this block with temporary builder.features_
+ features = builder.features_
+ builder.features_ = {}
+ Block.build(self, builder)
+ for key, value in builder.features_.items():
+ items = builder.feature_variations_.setdefault(key, {}).setdefault(
+ self.conditionset, []
+ )
+ items.extend(value)
+ if key not in features:
+ features[key] = [] # Ensure we make a feature record
+ builder.features_ = features
+ builder.end_feature()
+
+ def asFea(self, indent=""):
+ res = indent + "variation %s " % self.name.strip()
+ res += self.conditionset + " "
+ if self.use_extension:
+ res += "useExtension "
+ res += "{\n"
+ res += Block.asFea(self, indent=indent)
+ res += indent + "} %s;\n" % self.name.strip()
+ return res
diff --git a/Lib/fontTools/feaLib/builder.py b/Lib/fontTools/feaLib/builder.py
index 4a7d9575..a1644875 100644
--- a/Lib/fontTools/feaLib/builder.py
+++ b/Lib/fontTools/feaLib/builder.py
@@ -1,6 +1,5 @@
-from fontTools.misc.py23 import Tag, tostr
from fontTools.misc import sstruct
-from fontTools.misc.textTools import binary2num, safeEval
+from fontTools.misc.textTools import Tag, tostr, binary2num, safeEval
from fontTools.feaLib.error import FeatureLibError
from fontTools.feaLib.lookupDebugInfo import (
LookupDebugInfo,
@@ -9,6 +8,7 @@ from fontTools.feaLib.lookupDebugInfo import (
)
from fontTools.feaLib.parser import Parser
from fontTools.feaLib.ast import FeatureFile
+from fontTools.feaLib.variableScalar import VariableScalar
from fontTools.otlLib import builder as otl
from fontTools.otlLib.maxContextCalc import maxCtxFont
from fontTools.ttLib import newTable, getTableModule
@@ -31,6 +31,10 @@ from fontTools.otlLib.builder import (
ChainContextualRule,
)
from fontTools.otlLib.error import OpenTypeLibError
+from fontTools.varLib.varStore import OnlineVarStoreBuilder
+from fontTools.varLib.builder import buildVarDevTable
+from fontTools.varLib.featureVars import addFeatureVariationsRaw
+from fontTools.varLib.models import normalizeValue
from collections import defaultdict
import itertools
from io import StringIO
@@ -112,6 +116,12 @@ class Builder(object):
else:
self.parseTree, self.file = None, featurefile
self.glyphMap = font.getReverseGlyphMap()
+ self.varstorebuilder = None
+ if "fvar" in font:
+ self.axes = font["fvar"].axes
+ self.varstorebuilder = OnlineVarStoreBuilder(
+ [ax.axisTag for ax in self.axes]
+ )
self.default_language_systems_ = set()
self.script_ = None
self.lookupflag_ = 0
@@ -126,6 +136,7 @@ class Builder(object):
self.lookup_locations = {"GSUB": {}, "GPOS": {}}
self.features_ = {} # ('latn', 'DEU ', 'smcp') --> [LookupBuilder*]
self.required_features_ = {} # ('latn', 'DEU ') --> 'scmp'
+ self.feature_variations_ = {}
# for feature 'aalt'
self.aalt_features_ = [] # [(location, featureName)*], for 'aalt'
self.aalt_location_ = None
@@ -163,6 +174,8 @@ class Builder(object):
self.vhea_ = {}
# for table 'STAT'
self.stat_ = {}
+ # for conditionsets
+ self.conditionsets_ = {}
def build(self, tables=None, debug=False):
if self.parseTree is None:
@@ -198,6 +211,8 @@ class Builder(object):
if tag not in tables:
continue
table = self.makeTable(tag)
+ if self.feature_variations_:
+ self.makeFeatureVariations(table, tag)
if (
table.ScriptList.ScriptCount > 0
or table.FeatureList.FeatureCount > 0
@@ -215,6 +230,8 @@ class Builder(object):
self.font["GDEF"] = gdef
elif "GDEF" in self.font:
del self.font["GDEF"]
+ elif self.varstorebuilder:
+ raise FeatureLibError("Must save GDEF when compiling a variable font")
if "BASE" in tables:
base = self.buildBASE()
if base:
@@ -745,6 +762,16 @@ class Builder(object):
gdef.MarkAttachClassDef = self.buildGDEFMarkAttachClassDef_()
gdef.MarkGlyphSetsDef = self.buildGDEFMarkGlyphSetsDef_()
gdef.Version = 0x00010002 if gdef.MarkGlyphSetsDef else 0x00010000
+ if self.varstorebuilder:
+ store = self.varstorebuilder.finish()
+ if store.VarData:
+ gdef.Version = 0x00010003
+ gdef.VarStore = store
+ varidx_map = store.optimize()
+
+ gdef.remap_device_varidxes(varidx_map)
+ if 'GPOS' in self.font:
+ self.font['GPOS'].table.remap_device_varidxes(varidx_map)
if any(
(
gdef.GlyphClassDef,
@@ -753,7 +780,7 @@ class Builder(object):
gdef.MarkAttachClassDef,
gdef.MarkGlyphSetsDef,
)
- ):
+ ) or hasattr(gdef, "VarStore"):
result = newTable("GDEF")
result.table = gdef
return result
@@ -849,7 +876,8 @@ class Builder(object):
)
size_feature = tag == "GPOS" and feature_tag == "size"
- if len(lookup_indices) == 0 and not size_feature:
+ force_feature = self.any_feature_variations(feature_tag, tag)
+ if len(lookup_indices) == 0 and not size_feature and not force_feature:
continue
for ix in lookup_indices:
@@ -915,6 +943,42 @@ class Builder(object):
table.LookupList.LookupCount = len(table.LookupList.Lookup)
return table
+ def makeFeatureVariations(self, table, table_tag):
+ feature_vars = {}
+ has_any_variations = False
+ # Sort out which lookups to build, gather their indices
+ for (
+ script_,
+ language,
+ feature_tag,
+ ), variations in self.feature_variations_.items():
+ feature_vars[feature_tag] = []
+ for conditionset, builders in variations.items():
+ raw_conditionset = self.conditionsets_[conditionset]
+ indices = []
+ for b in builders:
+ if b.table != table_tag:
+ continue
+ assert b.lookup_index is not None
+ indices.append(b.lookup_index)
+ has_any_variations = True
+ feature_vars[feature_tag].append((raw_conditionset, indices))
+
+ if has_any_variations:
+ for feature_tag, conditions_and_lookups in feature_vars.items():
+ addFeatureVariationsRaw(
+ self.font, table, conditions_and_lookups, feature_tag
+ )
+
+ def any_feature_variations(self, feature_tag, table_tag):
+ for (_, _, feature), variations in self.feature_variations_.items():
+ if feature != feature_tag:
+ continue
+ for conditionset, builders in variations.items():
+ if any(b.table == table_tag for b in builders):
+ return True
+ return False
+
def get_lookup_name_(self, lookup):
rev = {v: k for k, v in self.named_lookups_.items()}
if lookup in rev:
@@ -1005,7 +1069,8 @@ class Builder(object):
assert lookup_name in self.named_lookups_, lookup_name
self.cur_lookup_ = None
lookup = self.named_lookups_[lookup_name]
- self.add_lookup_to_feature_(lookup, self.cur_feature_name_)
+ if lookup is not None: # skip empty named lookup
+ self.add_lookup_to_feature_(lookup, self.cur_feature_name_)
def set_font_revision(self, location, revision):
self.fontRevision_ = revision
@@ -1130,39 +1195,6 @@ class Builder(object):
for glyph in glyphs:
self.attachPoints_.setdefault(glyph, set()).update(contourPoints)
- def add_chain_context_pos(self, location, prefix, glyphs, suffix, lookups):
- lookup = self.get_lookup_(location, ChainContextPosBuilder)
- lookup.rules.append(
- ChainContextualRule(
- prefix, glyphs, suffix, self.find_lookup_builders_(lookups)
- )
- )
-
- def add_chain_context_subst(self, location, prefix, glyphs, suffix, lookups):
- lookup = self.get_lookup_(location, ChainContextSubstBuilder)
- lookup.rules.append(
- ChainContextualRule(
- prefix, glyphs, suffix, self.find_lookup_builders_(lookups)
- )
- )
-
- def add_alternate_subst(self, location, prefix, glyph, suffix, replacement):
- if self.cur_feature_name_ == "aalt":
- alts = self.aalt_alternates_.setdefault(glyph, set())
- alts.update(replacement)
- return
- if prefix or suffix:
- chain = self.get_lookup_(location, ChainContextSubstBuilder)
- lookup = self.get_chained_lookup_(location, AlternateSubstBuilder)
- chain.rules.append(ChainContextualRule(prefix, [{glyph}], suffix, [lookup]))
- else:
- lookup = self.get_lookup_(location, AlternateSubstBuilder)
- if glyph in lookup.alternates:
- raise FeatureLibError(
- 'Already defined alternates for glyph "%s"' % glyph, location
- )
- lookup.alternates[glyph] = replacement
-
def add_feature_reference(self, location, featureName):
if self.cur_feature_name_ != "aalt":
raise FeatureLibError(
@@ -1207,24 +1239,38 @@ class Builder(object):
key = (script, lang, self.cur_feature_name_)
self.features_.setdefault(key, [])
- def add_ligature_subst(
- self, location, prefix, glyphs, suffix, replacement, forceChain
- ):
- if prefix or suffix or forceChain:
- chain = self.get_lookup_(location, ChainContextSubstBuilder)
- lookup = self.get_chained_lookup_(location, LigatureSubstBuilder)
- chain.rules.append(ChainContextualRule(prefix, glyphs, suffix, [lookup]))
- else:
- lookup = self.get_lookup_(location, LigatureSubstBuilder)
+ # GSUB rules
- # OpenType feature file syntax, section 5.d, "Ligature substitution":
- # "Since the OpenType specification does not allow ligature
- # substitutions to be specified on target sequences that contain
- # glyph classes, the implementation software will enumerate
- # all specific glyph sequences if glyph classes are detected"
- for g in sorted(itertools.product(*glyphs)):
- lookup.ligatures[g] = replacement
+ # GSUB 1
+ def add_single_subst(self, location, prefix, suffix, mapping, forceChain):
+ if self.cur_feature_name_ == "aalt":
+ for (from_glyph, to_glyph) in mapping.items():
+ alts = self.aalt_alternates_.setdefault(from_glyph, set())
+ alts.add(to_glyph)
+ return
+ if prefix or suffix or forceChain:
+ self.add_single_subst_chained_(location, prefix, suffix, mapping)
+ return
+ lookup = self.get_lookup_(location, SingleSubstBuilder)
+ for (from_glyph, to_glyph) in mapping.items():
+ if from_glyph in lookup.mapping:
+ if to_glyph == lookup.mapping[from_glyph]:
+ log.info(
+ "Removing duplicate single substitution from glyph"
+ ' "%s" to "%s" at %s',
+ from_glyph,
+ to_glyph,
+ location,
+ )
+ else:
+ raise FeatureLibError(
+ 'Already defined rule for replacing glyph "%s" by "%s"'
+ % (from_glyph, lookup.mapping[from_glyph]),
+ location,
+ )
+ lookup.mapping[from_glyph] = to_glyph
+ # GSUB 2
def add_multiple_subst(
self, location, prefix, glyph, suffix, replacements, forceChain=False
):
@@ -1250,39 +1296,61 @@ class Builder(object):
)
lookup.mapping[glyph] = replacements
- def add_reverse_chain_single_subst(self, location, old_prefix, old_suffix, mapping):
- lookup = self.get_lookup_(location, ReverseChainSingleSubstBuilder)
- lookup.rules.append((old_prefix, old_suffix, mapping))
-
- def add_single_subst(self, location, prefix, suffix, mapping, forceChain):
+ # GSUB 3
+ def add_alternate_subst(self, location, prefix, glyph, suffix, replacement):
if self.cur_feature_name_ == "aalt":
- for (from_glyph, to_glyph) in mapping.items():
- alts = self.aalt_alternates_.setdefault(from_glyph, set())
- alts.add(to_glyph)
+ alts = self.aalt_alternates_.setdefault(glyph, set())
+ alts.update(replacement)
return
+ if prefix or suffix:
+ chain = self.get_lookup_(location, ChainContextSubstBuilder)
+ lookup = self.get_chained_lookup_(location, AlternateSubstBuilder)
+ chain.rules.append(ChainContextualRule(prefix, [{glyph}], suffix, [lookup]))
+ else:
+ lookup = self.get_lookup_(location, AlternateSubstBuilder)
+ if glyph in lookup.alternates:
+ raise FeatureLibError(
+ 'Already defined alternates for glyph "%s"' % glyph, location
+ )
+ # We allow empty replacement glyphs here.
+ lookup.alternates[glyph] = replacement
+
+ # GSUB 4
+ def add_ligature_subst(
+ self, location, prefix, glyphs, suffix, replacement, forceChain
+ ):
if prefix or suffix or forceChain:
- self.add_single_subst_chained_(location, prefix, suffix, mapping)
- return
- lookup = self.get_lookup_(location, SingleSubstBuilder)
- for (from_glyph, to_glyph) in mapping.items():
- if from_glyph in lookup.mapping:
- if to_glyph == lookup.mapping[from_glyph]:
- log.info(
- "Removing duplicate single substitution from glyph"
- ' "%s" to "%s" at %s',
- from_glyph,
- to_glyph,
- location,
- )
- else:
- raise FeatureLibError(
- 'Already defined rule for replacing glyph "%s" by "%s"'
- % (from_glyph, lookup.mapping[from_glyph]),
- location,
- )
- lookup.mapping[from_glyph] = to_glyph
+ chain = self.get_lookup_(location, ChainContextSubstBuilder)
+ lookup = self.get_chained_lookup_(location, LigatureSubstBuilder)
+ chain.rules.append(ChainContextualRule(prefix, glyphs, suffix, [lookup]))
+ else:
+ lookup = self.get_lookup_(location, LigatureSubstBuilder)
+
+ if not all(glyphs):
+ raise FeatureLibError("Empty glyph class in substitution", location)
+
+ # OpenType feature file syntax, section 5.d, "Ligature substitution":
+ # "Since the OpenType specification does not allow ligature
+ # substitutions to be specified on target sequences that contain
+ # glyph classes, the implementation software will enumerate
+ # all specific glyph sequences if glyph classes are detected"
+ for g in sorted(itertools.product(*glyphs)):
+ lookup.ligatures[g] = replacement
+
+ # GSUB 5/6
+ def add_chain_context_subst(self, location, prefix, glyphs, suffix, lookups):
+ if not all(glyphs) or not all(prefix) or not all(suffix):
+ raise FeatureLibError("Empty glyph class in contextual substitution", location)
+ lookup = self.get_lookup_(location, ChainContextSubstBuilder)
+ lookup.rules.append(
+ ChainContextualRule(
+ prefix, glyphs, suffix, self.find_lookup_builders_(lookups)
+ )
+ )
def add_single_subst_chained_(self, location, prefix, suffix, mapping):
+ if not mapping or not all(prefix) or not all(suffix):
+ raise FeatureLibError("Empty glyph class in contextual substitution", location)
# https://github.com/fonttools/fonttools/issues/512
chain = self.get_lookup_(location, ChainContextSubstBuilder)
sub = chain.find_chainable_single_subst(set(mapping.keys()))
@@ -1293,91 +1361,115 @@ class Builder(object):
ChainContextualRule(prefix, [list(mapping.keys())], suffix, [sub])
)
+ # GSUB 8
+ def add_reverse_chain_single_subst(self, location, old_prefix, old_suffix, mapping):
+ if not mapping:
+ raise FeatureLibError("Empty glyph class in substitution", location)
+ lookup = self.get_lookup_(location, ReverseChainSingleSubstBuilder)
+ lookup.rules.append((old_prefix, old_suffix, mapping))
+
+ # GPOS rules
+
+ # GPOS 1
+ def add_single_pos(self, location, prefix, suffix, pos, forceChain):
+ if prefix or suffix or forceChain:
+ self.add_single_pos_chained_(location, prefix, suffix, pos)
+ else:
+ lookup = self.get_lookup_(location, SinglePosBuilder)
+ for glyphs, value in pos:
+ if not glyphs:
+ raise FeatureLibError("Empty glyph class in positioning rule", location)
+ otValueRecord = self.makeOpenTypeValueRecord(location, value, pairPosContext=False)
+ for glyph in glyphs:
+ try:
+ lookup.add_pos(location, glyph, otValueRecord)
+ except OpenTypeLibError as e:
+ raise FeatureLibError(str(e), e.location) from e
+
+ # GPOS 2
+ def add_class_pair_pos(self, location, glyphclass1, value1, glyphclass2, value2):
+ if not glyphclass1 or not glyphclass2:
+ raise FeatureLibError(
+ "Empty glyph class in positioning rule", location
+ )
+ lookup = self.get_lookup_(location, PairPosBuilder)
+ v1 = self.makeOpenTypeValueRecord(location, value1, pairPosContext=True)
+ v2 = self.makeOpenTypeValueRecord(location, value2, pairPosContext=True)
+ lookup.addClassPair(location, glyphclass1, v1, glyphclass2, v2)
+
+ def add_specific_pair_pos(self, location, glyph1, value1, glyph2, value2):
+ if not glyph1 or not glyph2:
+ raise FeatureLibError("Empty glyph class in positioning rule", location)
+ lookup = self.get_lookup_(location, PairPosBuilder)
+ v1 = self.makeOpenTypeValueRecord(location, value1, pairPosContext=True)
+ v2 = self.makeOpenTypeValueRecord(location, value2, pairPosContext=True)
+ lookup.addGlyphPair(location, glyph1, v1, glyph2, v2)
+
+ # GPOS 3
def add_cursive_pos(self, location, glyphclass, entryAnchor, exitAnchor):
+ if not glyphclass:
+ raise FeatureLibError("Empty glyph class in positioning rule", location)
lookup = self.get_lookup_(location, CursivePosBuilder)
lookup.add_attachment(
location,
glyphclass,
- makeOpenTypeAnchor(entryAnchor),
- makeOpenTypeAnchor(exitAnchor),
+ self.makeOpenTypeAnchor(location, entryAnchor),
+ self.makeOpenTypeAnchor(location, exitAnchor),
)
- def add_marks_(self, location, lookupBuilder, marks):
- """Helper for add_mark_{base,liga,mark}_pos."""
- for _, markClass in marks:
- for markClassDef in markClass.definitions:
- for mark in markClassDef.glyphs.glyphSet():
- if mark not in lookupBuilder.marks:
- otMarkAnchor = makeOpenTypeAnchor(markClassDef.anchor)
- lookupBuilder.marks[mark] = (markClass.name, otMarkAnchor)
- else:
- existingMarkClass = lookupBuilder.marks[mark][0]
- if markClass.name != existingMarkClass:
- raise FeatureLibError(
- "Glyph %s cannot be in both @%s and @%s"
- % (mark, existingMarkClass, markClass.name),
- location,
- )
-
+ # GPOS 4
def add_mark_base_pos(self, location, bases, marks):
builder = self.get_lookup_(location, MarkBasePosBuilder)
self.add_marks_(location, builder, marks)
+ if not bases:
+ raise FeatureLibError("Empty glyph class in positioning rule", location)
for baseAnchor, markClass in marks:
- otBaseAnchor = makeOpenTypeAnchor(baseAnchor)
+ otBaseAnchor = self.makeOpenTypeAnchor(location, baseAnchor)
for base in bases:
builder.bases.setdefault(base, {})[markClass.name] = otBaseAnchor
+ # GPOS 5
def add_mark_lig_pos(self, location, ligatures, components):
builder = self.get_lookup_(location, MarkLigPosBuilder)
componentAnchors = []
+ if not ligatures:
+ raise FeatureLibError("Empty glyph class in positioning rule", location)
for marks in components:
anchors = {}
self.add_marks_(location, builder, marks)
for ligAnchor, markClass in marks:
- anchors[markClass.name] = makeOpenTypeAnchor(ligAnchor)
+ anchors[markClass.name] = self.makeOpenTypeAnchor(location, ligAnchor)
componentAnchors.append(anchors)
for glyph in ligatures:
builder.ligatures[glyph] = componentAnchors
+ # GPOS 6
def add_mark_mark_pos(self, location, baseMarks, marks):
builder = self.get_lookup_(location, MarkMarkPosBuilder)
self.add_marks_(location, builder, marks)
+ if not baseMarks:
+ raise FeatureLibError("Empty glyph class in positioning rule", location)
for baseAnchor, markClass in marks:
- otBaseAnchor = makeOpenTypeAnchor(baseAnchor)
+ otBaseAnchor = self.makeOpenTypeAnchor(location, baseAnchor)
for baseMark in baseMarks:
builder.baseMarks.setdefault(baseMark, {})[
markClass.name
] = otBaseAnchor
- def add_class_pair_pos(self, location, glyphclass1, value1, glyphclass2, value2):
- lookup = self.get_lookup_(location, PairPosBuilder)
- v1 = makeOpenTypeValueRecord(value1, pairPosContext=True)
- v2 = makeOpenTypeValueRecord(value2, pairPosContext=True)
- lookup.addClassPair(location, glyphclass1, v1, glyphclass2, v2)
-
- def add_subtable_break(self, location):
- self.cur_lookup_.add_subtable_break(location)
-
- def add_specific_pair_pos(self, location, glyph1, value1, glyph2, value2):
- lookup = self.get_lookup_(location, PairPosBuilder)
- v1 = makeOpenTypeValueRecord(value1, pairPosContext=True)
- v2 = makeOpenTypeValueRecord(value2, pairPosContext=True)
- lookup.addGlyphPair(location, glyph1, v1, glyph2, v2)
-
- def add_single_pos(self, location, prefix, suffix, pos, forceChain):
- if prefix or suffix or forceChain:
- self.add_single_pos_chained_(location, prefix, suffix, pos)
- else:
- lookup = self.get_lookup_(location, SinglePosBuilder)
- for glyphs, value in pos:
- otValueRecord = makeOpenTypeValueRecord(value, pairPosContext=False)
- for glyph in glyphs:
- try:
- lookup.add_pos(location, glyph, otValueRecord)
- except OpenTypeLibError as e:
- raise FeatureLibError(str(e), e.location) from e
+ # GPOS 7/8
+ def add_chain_context_pos(self, location, prefix, glyphs, suffix, lookups):
+ if not all(glyphs) or not all(prefix) or not all(suffix):
+ raise FeatureLibError("Empty glyph class in contextual positioning rule", location)
+ lookup = self.get_lookup_(location, ChainContextPosBuilder)
+ lookup.rules.append(
+ ChainContextualRule(
+ prefix, glyphs, suffix, self.find_lookup_builders_(lookups)
+ )
+ )
def add_single_pos_chained_(self, location, prefix, suffix, pos):
+ if not pos or not all(prefix) or not all(suffix):
+ raise FeatureLibError("Empty glyph class in contextual positioning rule", location)
# https://github.com/fonttools/fonttools/issues/514
chain = self.get_lookup_(location, ChainContextPosBuilder)
targets = []
@@ -1388,7 +1480,7 @@ class Builder(object):
if value is None:
subs.append(None)
continue
- otValue = makeOpenTypeValueRecord(value, pairPosContext=False)
+ otValue = self.makeOpenTypeValueRecord(location, value, pairPosContext=False)
sub = chain.find_chainable_single_pos(targets, glyphs, otValue)
if sub is None:
sub = self.get_chained_lookup_(location, SinglePosBuilder)
@@ -1401,6 +1493,26 @@ class Builder(object):
ChainContextualRule(prefix, [g for g, v in pos], suffix, subs)
)
+ def add_marks_(self, location, lookupBuilder, marks):
+ """Helper for add_mark_{base,liga,mark}_pos."""
+ for _, markClass in marks:
+ for markClassDef in markClass.definitions:
+ for mark in markClassDef.glyphs.glyphSet():
+ if mark not in lookupBuilder.marks:
+ otMarkAnchor = self.makeOpenTypeAnchor(location, markClassDef.anchor)
+ lookupBuilder.marks[mark] = (markClass.name, otMarkAnchor)
+ else:
+ existingMarkClass = lookupBuilder.marks[mark][0]
+ if markClass.name != existingMarkClass:
+ raise FeatureLibError(
+ "Glyph %s cannot be in both @%s and @%s"
+ % (mark, existingMarkClass, markClass.name),
+ location,
+ )
+
+ def add_subtable_break(self, location):
+ self.cur_lookup_.add_subtable_break(location)
+
def setGlyphClass_(self, location, glyph, glyphClass):
oldClass, oldLocation = self.glyphClassDefs_.get(glyph, (None, None))
if oldClass and oldClass != glyphClass:
@@ -1445,37 +1557,98 @@ class Builder(object):
def add_vhea_field(self, key, value):
self.vhea_[key] = value
+ def add_conditionset(self, key, value):
+ if not "fvar" in self.font:
+ raise FeatureLibError(
+ "Cannot add feature variations to a font without an 'fvar' table"
+ )
+
+ # Normalize
+ axisMap = {
+ axis.axisTag: (axis.minValue, axis.defaultValue, axis.maxValue)
+ for axis in self.axes
+ }
-def makeOpenTypeAnchor(anchor):
- """ast.Anchor --> otTables.Anchor"""
- if anchor is None:
- return None
- deviceX, deviceY = None, None
- if anchor.xDeviceTable is not None:
- deviceX = otl.buildDevice(dict(anchor.xDeviceTable))
- if anchor.yDeviceTable is not None:
- deviceY = otl.buildDevice(dict(anchor.yDeviceTable))
- return otl.buildAnchor(anchor.x, anchor.y, anchor.contourpoint, deviceX, deviceY)
+ value = {
+ tag: (
+ normalizeValue(bottom, axisMap[tag]),
+ normalizeValue(top, axisMap[tag]),
+ )
+ for tag, (bottom, top) in value.items()
+ }
+
+ self.conditionsets_[key] = value
+
+ def makeOpenTypeAnchor(self, location, anchor):
+ """ast.Anchor --> otTables.Anchor"""
+ if anchor is None:
+ return None
+ variable = False
+ deviceX, deviceY = None, None
+ if anchor.xDeviceTable is not None:
+ deviceX = otl.buildDevice(dict(anchor.xDeviceTable))
+ if anchor.yDeviceTable is not None:
+ deviceY = otl.buildDevice(dict(anchor.yDeviceTable))
+ for dim in ("x", "y"):
+ if not isinstance(getattr(anchor, dim), VariableScalar):
+ continue
+ if getattr(anchor, dim+"DeviceTable") is not None:
+ raise FeatureLibError("Can't define a device coordinate and variable scalar", location)
+ if not self.varstorebuilder:
+ raise FeatureLibError("Can't define a variable scalar in a non-variable font", location)
+ varscalar = getattr(anchor,dim)
+ varscalar.axes = self.axes
+ default, index = varscalar.add_to_variation_store(self.varstorebuilder)
+ setattr(anchor, dim, default)
+ if index is not None and index != 0xFFFFFFFF:
+ if dim == "x":
+ deviceX = buildVarDevTable(index)
+ else:
+ deviceY = buildVarDevTable(index)
+ variable = True
+ otlanchor = otl.buildAnchor(anchor.x, anchor.y, anchor.contourpoint, deviceX, deviceY)
+ if variable:
+ otlanchor.Format = 3
+ return otlanchor
-_VALUEREC_ATTRS = {
- name[0].lower() + name[1:]: (name, isDevice)
- for _, name, isDevice, _ in otBase.valueRecordFormat
- if not name.startswith("Reserved")
-}
+ _VALUEREC_ATTRS = {
+ name[0].lower() + name[1:]: (name, isDevice)
+ for _, name, isDevice, _ in otBase.valueRecordFormat
+ if not name.startswith("Reserved")
+ }
-def makeOpenTypeValueRecord(v, pairPosContext):
- """ast.ValueRecord --> otBase.ValueRecord"""
- if not v:
- return None
+ def makeOpenTypeValueRecord(self, location, v, pairPosContext):
+ """ast.ValueRecord --> otBase.ValueRecord"""
+ if not v:
+ return None
+
+ vr = {}
+ variable = False
+ for astName, (otName, isDevice) in self._VALUEREC_ATTRS.items():
+ val = getattr(v, astName, None)
+ if not val:
+ continue
+ if isDevice:
+ vr[otName] = otl.buildDevice(dict(val))
+ elif isinstance(val, VariableScalar):
+ otDeviceName = otName[0:4] + "Device"
+ feaDeviceName = otDeviceName[0].lower() + otDeviceName[1:]
+ if getattr(v, feaDeviceName):
+ raise FeatureLibError("Can't define a device coordinate and variable scalar", location)
+ if not self.varstorebuilder:
+ raise FeatureLibError("Can't define a variable scalar in a non-variable font", location)
+ val.axes = self.axes
+ default, index = val.add_to_variation_store(self.varstorebuilder)
+ vr[otName] = default
+ if index is not None and index != 0xFFFFFFFF:
+ vr[otDeviceName] = buildVarDevTable(index)
+ variable = True
+ else:
+ vr[otName] = val
- vr = {}
- for astName, (otName, isDevice) in _VALUEREC_ATTRS.items():
- val = getattr(v, astName, None)
- if val:
- vr[otName] = otl.buildDevice(dict(val)) if isDevice else val
- if pairPosContext and not vr:
- vr = {"YAdvance": 0} if v.vertical else {"XAdvance": 0}
- valRec = otl.buildValue(vr)
- return valRec
+ if pairPosContext and not vr:
+ vr = {"YAdvance": 0} if v.vertical else {"XAdvance": 0}
+ valRec = otl.buildValue(vr)
+ return valRec
diff --git a/Lib/fontTools/feaLib/parser.py b/Lib/fontTools/feaLib/parser.py
index 804cba9f..fd53573d 100644
--- a/Lib/fontTools/feaLib/parser.py
+++ b/Lib/fontTools/feaLib/parser.py
@@ -1,7 +1,8 @@
from fontTools.feaLib.error import FeatureLibError
from fontTools.feaLib.lexer import Lexer, IncludingLexer, NonIncludingLexer
+from fontTools.feaLib.variableScalar import VariableScalar
from fontTools.misc.encodingTools import getEncoding
-from fontTools.misc.py23 import bytechr, tobytes, tostr
+from fontTools.misc.textTools import bytechr, tobytes, tostr
import fontTools.feaLib.ast as ast
import logging
import os
@@ -101,6 +102,10 @@ class Parser(object):
statements.append(self.parse_markClass_())
elif self.is_cur_keyword_("feature"):
statements.append(self.parse_feature_block_())
+ elif self.is_cur_keyword_("conditionset"):
+ statements.append(self.parse_conditionset_())
+ elif self.is_cur_keyword_("variation"):
+ statements.append(self.parse_feature_block_(variation=True))
elif self.is_cur_keyword_("table"):
statements.append(self.parse_table_())
elif self.is_cur_keyword_("valueRecordDef"):
@@ -152,7 +157,7 @@ class Parser(object):
location=location,
)
- x, y = self.expect_number_(), self.expect_number_()
+ x, y = self.expect_number_(variable=True), self.expect_number_(variable=True)
contourpoint = None
if self.next_token_ == "contourpoint": # Format B
@@ -380,8 +385,7 @@ class Parser(object):
self.expect_symbol_("-")
range_end = self.expect_cid_()
self.check_glyph_name_in_glyph_set(
- f"cid{range_start:05d}",
- f"cid{range_end:05d}",
+ f"cid{range_start:05d}", f"cid{range_end:05d}",
)
glyphs.add_cid_range(
range_start,
@@ -473,14 +477,38 @@ class Parser(object):
assert lookups == []
return ([], prefix, [None] * len(prefix), values, [], hasMarks)
else:
- assert not any(values[: len(prefix)]), values
- format1 = values[len(prefix) :][: len(glyphs)]
- format2 = values[(len(prefix) + len(glyphs)) :][: len(suffix)]
- values = (
- format2
- if format2 and isinstance(format2[0], self.ast.ValueRecord)
- else format1
- )
+ if any(values[: len(prefix)]):
+ raise FeatureLibError(
+ "Positioning cannot be applied in the bactrack glyph sequence, "
+ "before the marked glyph sequence.",
+ self.cur_token_location_,
+ )
+ marked_values = values[len(prefix) : len(prefix) + len(glyphs)]
+ if any(marked_values):
+ if any(values[len(prefix) + len(glyphs) :]):
+ raise FeatureLibError(
+ "Positioning values are allowed only in the marked glyph "
+ "sequence, or after the final glyph node when only one glyph "
+ "node is marked.",
+ self.cur_token_location_,
+ )
+ values = marked_values
+ elif values and values[-1]:
+ if len(glyphs) > 1 or any(values[:-1]):
+ raise FeatureLibError(
+ "Positioning values are allowed only in the marked glyph "
+ "sequence, or after the final glyph node when only one glyph "
+ "node is marked.",
+ self.cur_token_location_,
+ )
+ values = values[-1:]
+ elif any(values):
+ raise FeatureLibError(
+ "Positioning values are allowed only in the marked glyph "
+ "sequence, or after the final glyph node when only one glyph "
+ "node is marked.",
+ self.cur_token_location_,
+ )
return (prefix, glyphs, lookups, values, suffix, hasMarks)
def parse_chain_context_(self):
@@ -656,6 +684,8 @@ class Parser(object):
assert self.is_cur_keyword_("markClass")
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
+ if not glyphs.glyphSet():
+ raise FeatureLibError("Empty glyph class in mark class definition", location)
anchor = self.parse_anchor_()
name = self.expect_class_name_()
self.expect_symbol_(";")
@@ -844,7 +874,7 @@ class Parser(object):
num_lookups = len([l for l in lookups if l is not None])
is_deletion = False
- if len(new) == 1 and len(new[0].glyphSet()) == 0:
+ if len(new) == 1 and isinstance(new[0], ast.NullGlyph):
new = [] # Deletion
is_deletion = True
@@ -868,18 +898,31 @@ class Parser(object):
old, new, old_prefix, old_suffix, forceChain=hasMarks, location=location
)
+ # Glyph deletion, built as GSUB lookup type 2: Multiple substitution
+ # with empty replacement.
+ if is_deletion and len(old) == 1 and num_lookups == 0:
+ return self.ast.MultipleSubstStatement(
+ old_prefix,
+ old[0],
+ old_suffix,
+ (),
+ forceChain=hasMarks,
+ location=location,
+ )
+
# GSUB lookup type 2: Multiple substitution.
# Format: "substitute f_f_i by f f i;"
if (
not reverse
and len(old) == 1
and len(old[0].glyphSet()) == 1
- and (
- (len(new) > 1 and max([len(n.glyphSet()) for n in new]) == 1)
- or len(new) == 0
- )
+ and len(new) > 1
+ and max([len(n.glyphSet()) for n in new]) == 1
and num_lookups == 0
):
+ for n in new:
+ if not list(n.glyphSet()):
+ raise FeatureLibError("Empty class in replacement", location)
return self.ast.MultipleSubstStatement(
old_prefix,
tuple(old[0].glyphSet())[0],
@@ -971,8 +1014,8 @@ class Parser(object):
location = self.cur_token_location_
DesignSize = self.expect_decipoint_()
SubfamilyID = self.expect_number_()
- RangeStart = 0.
- RangeEnd = 0.
+ RangeStart = 0.0
+ RangeEnd = 0.0
if self.next_token_type_ in (Lexer.NUMBER, Lexer.FLOAT) or SubfamilyID != 0:
RangeStart = self.expect_decipoint_()
RangeEnd = self.expect_decipoint_()
@@ -1551,11 +1594,20 @@ class Parser(object):
return result
def is_next_value_(self):
- return self.next_token_type_ is Lexer.NUMBER or self.next_token_ == "<"
+ return (
+ self.next_token_type_ is Lexer.NUMBER
+ or self.next_token_ == "<"
+ or self.next_token_ == "("
+ )
def parse_valuerecord_(self, vertical):
- if self.next_token_type_ is Lexer.NUMBER:
- number, location = self.expect_number_(), self.cur_token_location_
+ if (
+ self.next_token_type_ is Lexer.SYMBOL and self.next_token_ == "("
+ ) or self.next_token_type_ is Lexer.NUMBER:
+ number, location = (
+ self.expect_number_(variable=True),
+ self.cur_token_location_,
+ )
if vertical:
val = self.ast.ValueRecord(
yAdvance=number, vertical=vertical, location=location
@@ -1582,10 +1634,10 @@ class Parser(object):
xAdvance, yAdvance = (value.xAdvance, value.yAdvance)
else:
xPlacement, yPlacement, xAdvance, yAdvance = (
- self.expect_number_(),
- self.expect_number_(),
- self.expect_number_(),
- self.expect_number_(),
+ self.expect_number_(variable=True),
+ self.expect_number_(variable=True),
+ self.expect_number_(variable=True),
+ self.expect_number_(variable=True),
)
if self.next_token_ == "<":
@@ -1645,8 +1697,11 @@ class Parser(object):
self.expect_symbol_(";")
return self.ast.LanguageSystemStatement(script, language, location=location)
- def parse_feature_block_(self):
- assert self.cur_token_ == "feature"
+ def parse_feature_block_(self, variation=False):
+ if variation:
+ assert self.cur_token_ == "variation"
+ else:
+ assert self.cur_token_ == "feature"
location = self.cur_token_location_
tag = self.expect_tag_()
vertical = tag in {"vkrn", "vpal", "vhal", "valt"}
@@ -1661,14 +1716,22 @@ class Parser(object):
elif tag == "size":
size_feature = True
+ if variation:
+ conditionset = self.expect_name_()
+
use_extension = False
if self.next_token_ == "useExtension":
self.expect_keyword_("useExtension")
use_extension = True
- block = self.ast.FeatureBlock(
- tag, use_extension=use_extension, location=location
- )
+ if variation:
+ block = self.ast.VariationBlock(
+ tag, conditionset, use_extension=use_extension, location=location
+ )
+ else:
+ block = self.ast.FeatureBlock(
+ tag, use_extension=use_extension, location=location
+ )
self.parse_block_(block, vertical, stylisticset, size_feature, cv_feature)
return block
@@ -1816,6 +1879,43 @@ class Parser(object):
raise FeatureLibError("Font revision numbers must be positive", location)
return self.ast.FontRevisionStatement(version, location=location)
+ def parse_conditionset_(self):
+ name = self.expect_name_()
+
+ conditions = {}
+ self.expect_symbol_("{")
+
+ while self.next_token_ != "}":
+ self.advance_lexer_()
+ if self.cur_token_type_ is not Lexer.NAME:
+ raise FeatureLibError("Expected an axis name", self.cur_token_location_)
+
+ axis = self.cur_token_
+ if axis in conditions:
+ raise FeatureLibError(
+ f"Repeated condition for axis {axis}", self.cur_token_location_
+ )
+
+ if self.next_token_type_ is Lexer.FLOAT:
+ min_value = self.expect_float_()
+ elif self.next_token_type_ is Lexer.NUMBER:
+ min_value = self.expect_number_(variable=False)
+
+ if self.next_token_type_ is Lexer.FLOAT:
+ max_value = self.expect_float_()
+ elif self.next_token_type_ is Lexer.NUMBER:
+ max_value = self.expect_number_(variable=False)
+ self.expect_symbol_(";")
+
+ conditions[axis] = (min_value, max_value)
+
+ self.expect_symbol_("}")
+
+ finalname = self.expect_name_()
+ if finalname != name:
+ raise FeatureLibError('Expected "%s"' % name, self.cur_token_location_)
+ return self.ast.ConditionsetStatement(name, conditions)
+
def parse_block_(
self, block, vertical, stylisticset=None, size_feature=False, cv_feature=None
):
@@ -2046,12 +2146,51 @@ class Parser(object):
return self.cur_token_
raise FeatureLibError("Expected a name", self.cur_token_location_)
- def expect_number_(self):
+ def expect_number_(self, variable=False):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NUMBER:
return self.cur_token_
+ if variable and self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == "(":
+ return self.expect_variable_scalar_()
raise FeatureLibError("Expected a number", self.cur_token_location_)
+ def expect_variable_scalar_(self):
+ self.advance_lexer_() # "("
+ scalar = VariableScalar()
+ while True:
+ if self.cur_token_type_ == Lexer.SYMBOL and self.cur_token_ == ")":
+ break
+ location, value = self.expect_master_()
+ scalar.add_value(location, value)
+ return scalar
+
+ def expect_master_(self):
+ location = {}
+ while True:
+ if self.cur_token_type_ is not Lexer.NAME:
+ raise FeatureLibError("Expected an axis name", self.cur_token_location_)
+ axis = self.cur_token_
+ self.advance_lexer_()
+ if not (self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == "="):
+ raise FeatureLibError(
+ "Expected an equals sign", self.cur_token_location_
+ )
+ value = self.expect_number_()
+ location[axis] = value
+ if self.next_token_type_ is Lexer.NAME and self.next_token_[0] == ":":
+ # Lexer has just read the value as a glyph name. We'll correct it later
+ break
+ self.advance_lexer_()
+ if not (self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ","):
+ raise FeatureLibError(
+ "Expected an comma or an equals sign", self.cur_token_location_
+ )
+ self.advance_lexer_()
+ self.advance_lexer_()
+ value = int(self.cur_token_[1:])
+ self.advance_lexer_()
+ return location, value
+
def expect_any_number_(self):
self.advance_lexer_()
if self.cur_token_type_ in Lexer.NUMBERS:
diff --git a/Lib/fontTools/feaLib/variableScalar.py b/Lib/fontTools/feaLib/variableScalar.py
new file mode 100644
index 00000000..a286568e
--- /dev/null
+++ b/Lib/fontTools/feaLib/variableScalar.py
@@ -0,0 +1,97 @@
+from fontTools.varLib.models import VariationModel, normalizeValue
+
+
+def Location(loc):
+ return tuple(sorted(loc.items()))
+
+
+class VariableScalar:
+ """A scalar with different values at different points in the designspace."""
+
+ def __init__(self, location_value={}):
+ self.values = {}
+ self.axes = {}
+ for location, value in location_value.items():
+ self.add_value(location, value)
+
+ def __repr__(self):
+ items = []
+ for location, value in self.values.items():
+ loc = ",".join(["%s=%i" % (ax, loc) for ax, loc in location])
+ items.append("%s:%i" % (loc, value))
+ return "(" + (" ".join(items)) + ")"
+
+ @property
+ def does_vary(self):
+ values = list(self.values.values())
+ return any(v != values[0] for v in values[1:])
+
+ @property
+ def axes_dict(self):
+ if not self.axes:
+ raise ValueError(
+ ".axes must be defined on variable scalar before interpolating"
+ )
+ return {ax.axisTag: ax for ax in self.axes}
+
+ def _normalized_location(self, location):
+ location = self.fix_location(location)
+ normalized_location = {}
+ for axtag in location.keys():
+ if axtag not in self.axes_dict:
+ raise ValueError("Unknown axis %s in %s" % (axtag, location))
+ axis = self.axes_dict[axtag]
+ normalized_location[axtag] = normalizeValue(
+ location[axtag], (axis.minValue, axis.defaultValue, axis.maxValue)
+ )
+
+ return Location(normalized_location)
+
+ def fix_location(self, location):
+ location = dict(location)
+ for tag, axis in self.axes_dict.items():
+ if tag not in location:
+ location[tag] = axis.defaultValue
+ return location
+
+ def add_value(self, location, value):
+ if self.axes:
+ location = self.fix_location(location)
+
+ self.values[Location(location)] = value
+
+ def fix_all_locations(self):
+ self.values = {
+ Location(self.fix_location(l)): v for l, v in self.values.items()
+ }
+
+ @property
+ def default(self):
+ self.fix_all_locations()
+ key = Location({ax.axisTag: ax.defaultValue for ax in self.axes})
+ if key not in self.values:
+ raise ValueError("Default value could not be found")
+ # I *guess* we could interpolate one, but I don't know how.
+ return self.values[key]
+
+ def value_at_location(self, location):
+ loc = location
+ if loc in self.values.keys():
+ return self.values[loc]
+ values = list(self.values.values())
+ return self.model.interpolateFromMasters(loc, values)
+
+ @property
+ def model(self):
+ locations = [dict(self._normalized_location(k)) for k in self.values.keys()]
+ return VariationModel(locations)
+
+ def get_deltas_and_supports(self):
+ values = list(self.values.values())
+ return self.model.getDeltasAndSupports(values)
+
+ def add_to_variation_store(self, store_builder):
+ deltas, supports = self.get_deltas_and_supports()
+ store_builder.setSupports(supports)
+ index = store_builder.storeDeltas(deltas)
+ return int(self.default), index
diff --git a/Lib/fontTools/fontBuilder.py b/Lib/fontTools/fontBuilder.py
index e2824084..bf3b31b7 100644
--- a/Lib/fontTools/fontBuilder.py
+++ b/Lib/fontTools/fontBuilder.py
@@ -834,7 +834,14 @@ class FontBuilder(object):
self.font, conditionalSubstitutions, featureTag=featureTag
)
- def setupCOLR(self, colorLayers, version=None, varStore=None):
+ def setupCOLR(
+ self,
+ colorLayers,
+ version=None,
+ varStore=None,
+ varIndexMap=None,
+ clipBoxes=None,
+ ):
"""Build new COLR table using color layers dictionary.
Cf. `fontTools.colorLib.builder.buildCOLR`.
@@ -843,7 +850,12 @@ class FontBuilder(object):
glyphMap = self.font.getReverseGlyphMap()
self.font["COLR"] = buildCOLR(
- colorLayers, version=version, glyphMap=glyphMap, varStore=varStore
+ colorLayers,
+ version=version,
+ glyphMap=glyphMap,
+ varStore=varStore,
+ varIndexMap=varIndexMap,
+ clipBoxes=clipBoxes,
)
def setupCPAL(
diff --git a/Lib/fontTools/help.py b/Lib/fontTools/help.py
index ff8048d5..4334e500 100644
--- a/Lib/fontTools/help.py
+++ b/Lib/fontTools/help.py
@@ -22,7 +22,8 @@ def main():
description = imports.main.__doc__
if description:
pkg = pkg.replace("fontTools.", "").replace(".__main__", "")
- descriptions[pkg] = description
+ # show the docstring's first line only
+ descriptions[pkg] = description.splitlines()[0]
except AttributeError as e:
pass
for pkg, description in descriptions.items():
diff --git a/Lib/fontTools/merge.py b/Lib/fontTools/merge.py
deleted file mode 100644
index 2df22a8d..00000000
--- a/Lib/fontTools/merge.py
+++ /dev/null
@@ -1,1205 +0,0 @@
-# Copyright 2013 Google, Inc. All Rights Reserved.
-#
-# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
-
-from fontTools.misc.timeTools import timestampNow
-from fontTools import ttLib, cffLib
-from fontTools.ttLib.tables import otTables, _h_e_a_d
-from fontTools.ttLib.tables.DefaultTable import DefaultTable
-from fontTools.misc.loggingTools import Timer
-from fontTools.pens.recordingPen import DecomposingRecordingPen
-from functools import reduce
-import sys
-import time
-import operator
-import logging
-
-
-log = logging.getLogger("fontTools.merge")
-timer = Timer(logger=logging.getLogger(__name__+".timer"), level=logging.INFO)
-
-
-def _add_method(*clazzes, **kwargs):
- """Returns a decorator function that adds a new method to one or
- more classes."""
- allowDefault = kwargs.get('allowDefaultTable', False)
- def wrapper(method):
- done = []
- for clazz in clazzes:
- if clazz in done: continue # Support multiple names of a clazz
- done.append(clazz)
- assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.'
- assert method.__name__ not in clazz.__dict__, \
- "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
- setattr(clazz, method.__name__, method)
- return None
- return wrapper
-
-# General utility functions for merging values from different fonts
-
-def equal(lst):
- lst = list(lst)
- t = iter(lst)
- first = next(t)
- assert all(item == first for item in t), "Expected all items to be equal: %s" % lst
- return first
-
-def first(lst):
- return next(iter(lst))
-
-def recalculate(lst):
- return NotImplemented
-
-def current_time(lst):
- return timestampNow()
-
-def bitwise_and(lst):
- return reduce(operator.and_, lst)
-
-def bitwise_or(lst):
- return reduce(operator.or_, lst)
-
-def avg_int(lst):
- lst = list(lst)
- return sum(lst) // len(lst)
-
-def onlyExisting(func):
- """Returns a filter func that when called with a list,
- only calls func on the non-NotImplemented items of the list,
- and only so if there's at least one item remaining.
- Otherwise returns NotImplemented."""
-
- def wrapper(lst):
- items = [item for item in lst if item is not NotImplemented]
- return func(items) if items else NotImplemented
-
- return wrapper
-
-def sumLists(lst):
- l = []
- for item in lst:
- l.extend(item)
- return l
-
-def sumDicts(lst):
- d = {}
- for item in lst:
- d.update(item)
- return d
-
-def mergeObjects(lst):
- lst = [item for item in lst if item is not NotImplemented]
- if not lst:
- return NotImplemented
- lst = [item for item in lst if item is not None]
- if not lst:
- return None
-
- clazz = lst[0].__class__
- assert all(type(item) == clazz for item in lst), lst
-
- logic = clazz.mergeMap
- returnTable = clazz()
- returnDict = {}
-
- allKeys = set.union(set(), *(vars(table).keys() for table in lst))
- for key in allKeys:
- try:
- mergeLogic = logic[key]
- except KeyError:
- try:
- mergeLogic = logic['*']
- except KeyError:
- raise Exception("Don't know how to merge key %s of class %s" %
- (key, clazz.__name__))
- if mergeLogic is NotImplemented:
- continue
- value = mergeLogic(getattr(table, key, NotImplemented) for table in lst)
- if value is not NotImplemented:
- returnDict[key] = value
-
- returnTable.__dict__ = returnDict
-
- return returnTable
-
-def mergeBits(bitmap):
-
- def wrapper(lst):
- lst = list(lst)
- returnValue = 0
- for bitNumber in range(bitmap['size']):
- try:
- mergeLogic = bitmap[bitNumber]
- except KeyError:
- try:
- mergeLogic = bitmap['*']
- except KeyError:
- raise Exception("Don't know how to merge bit %s" % bitNumber)
- shiftedBit = 1 << bitNumber
- mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst)
- returnValue |= mergedValue << bitNumber
- return returnValue
-
- return wrapper
-
-
-@_add_method(DefaultTable, allowDefaultTable=True)
-def merge(self, m, tables):
- if not hasattr(self, 'mergeMap'):
- log.info("Don't know how to merge '%s'.", self.tableTag)
- return NotImplemented
-
- logic = self.mergeMap
-
- if isinstance(logic, dict):
- return m.mergeObjects(self, self.mergeMap, tables)
- else:
- return logic(tables)
-
-
-ttLib.getTableClass('maxp').mergeMap = {
- '*': max,
- 'tableTag': equal,
- 'tableVersion': equal,
- 'numGlyphs': sum,
- 'maxStorage': first,
- 'maxFunctionDefs': first,
- 'maxInstructionDefs': first,
- # TODO When we correctly merge hinting data, update these values:
- # maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions
-}
-
-headFlagsMergeBitMap = {
- 'size': 16,
- '*': bitwise_or,
- 1: bitwise_and, # Baseline at y = 0
- 2: bitwise_and, # lsb at x = 0
- 3: bitwise_and, # Force ppem to integer values. FIXME?
- 5: bitwise_and, # Font is vertical
- 6: lambda bit: 0, # Always set to zero
- 11: bitwise_and, # Font data is 'lossless'
- 13: bitwise_and, # Optimized for ClearType
- 14: bitwise_and, # Last resort font. FIXME? equal or first may be better
- 15: lambda bit: 0, # Always set to zero
-}
-
-ttLib.getTableClass('head').mergeMap = {
- 'tableTag': equal,
- 'tableVersion': max,
- 'fontRevision': max,
- 'checkSumAdjustment': lambda lst: 0, # We need *something* here
- 'magicNumber': equal,
- 'flags': mergeBits(headFlagsMergeBitMap),
- 'unitsPerEm': equal,
- 'created': current_time,
- 'modified': current_time,
- 'xMin': min,
- 'yMin': min,
- 'xMax': max,
- 'yMax': max,
- 'macStyle': first,
- 'lowestRecPPEM': max,
- 'fontDirectionHint': lambda lst: 2,
- 'indexToLocFormat': recalculate,
- 'glyphDataFormat': equal,
-}
-
-ttLib.getTableClass('hhea').mergeMap = {
- '*': equal,
- 'tableTag': equal,
- 'tableVersion': max,
- 'ascent': max,
- 'descent': min,
- 'lineGap': max,
- 'advanceWidthMax': max,
- 'minLeftSideBearing': min,
- 'minRightSideBearing': min,
- 'xMaxExtent': max,
- 'caretSlopeRise': first,
- 'caretSlopeRun': first,
- 'caretOffset': first,
- 'numberOfHMetrics': recalculate,
-}
-
-ttLib.getTableClass('vhea').mergeMap = {
- '*': equal,
- 'tableTag': equal,
- 'tableVersion': max,
- 'ascent': max,
- 'descent': min,
- 'lineGap': max,
- 'advanceHeightMax': max,
- 'minTopSideBearing': min,
- 'minBottomSideBearing': min,
- 'yMaxExtent': max,
- 'caretSlopeRise': first,
- 'caretSlopeRun': first,
- 'caretOffset': first,
- 'numberOfVMetrics': recalculate,
-}
-
-os2FsTypeMergeBitMap = {
- 'size': 16,
- '*': lambda bit: 0,
- 1: bitwise_or, # no embedding permitted
- 2: bitwise_and, # allow previewing and printing documents
- 3: bitwise_and, # allow editing documents
- 8: bitwise_or, # no subsetting permitted
- 9: bitwise_or, # no embedding of outlines permitted
-}
-
-def mergeOs2FsType(lst):
- lst = list(lst)
- if all(item == 0 for item in lst):
- return 0
-
- # Compute least restrictive logic for each fsType value
- for i in range(len(lst)):
- # unset bit 1 (no embedding permitted) if either bit 2 or 3 is set
- if lst[i] & 0x000C:
- lst[i] &= ~0x0002
- # set bit 2 (allow previewing) if bit 3 is set (allow editing)
- elif lst[i] & 0x0008:
- lst[i] |= 0x0004
- # set bits 2 and 3 if everything is allowed
- elif lst[i] == 0:
- lst[i] = 0x000C
-
- fsType = mergeBits(os2FsTypeMergeBitMap)(lst)
- # unset bits 2 and 3 if bit 1 is set (some font is "no embedding")
- if fsType & 0x0002:
- fsType &= ~0x000C
- return fsType
-
-
-ttLib.getTableClass('OS/2').mergeMap = {
- '*': first,
- 'tableTag': equal,
- 'version': max,
- 'xAvgCharWidth': avg_int, # Apparently fontTools doesn't recalc this
- 'fsType': mergeOs2FsType, # Will be overwritten
- 'panose': first, # FIXME: should really be the first Latin font
- 'ulUnicodeRange1': bitwise_or,
- 'ulUnicodeRange2': bitwise_or,
- 'ulUnicodeRange3': bitwise_or,
- 'ulUnicodeRange4': bitwise_or,
- 'fsFirstCharIndex': min,
- 'fsLastCharIndex': max,
- 'sTypoAscender': max,
- 'sTypoDescender': min,
- 'sTypoLineGap': max,
- 'usWinAscent': max,
- 'usWinDescent': max,
- # Version 1
- 'ulCodePageRange1': onlyExisting(bitwise_or),
- 'ulCodePageRange2': onlyExisting(bitwise_or),
- # Version 2, 3, 4
- 'sxHeight': onlyExisting(max),
- 'sCapHeight': onlyExisting(max),
- 'usDefaultChar': onlyExisting(first),
- 'usBreakChar': onlyExisting(first),
- 'usMaxContext': onlyExisting(max),
- # version 5
- 'usLowerOpticalPointSize': onlyExisting(min),
- 'usUpperOpticalPointSize': onlyExisting(max),
-}
-
-@_add_method(ttLib.getTableClass('OS/2'))
-def merge(self, m, tables):
- DefaultTable.merge(self, m, tables)
- if self.version < 2:
- # bits 8 and 9 are reserved and should be set to zero
- self.fsType &= ~0x0300
- if self.version >= 3:
- # Only one of bits 1, 2, and 3 may be set. We already take
- # care of bit 1 implications in mergeOs2FsType. So unset
- # bit 2 if bit 3 is already set.
- if self.fsType & 0x0008:
- self.fsType &= ~0x0004
- return self
-
-ttLib.getTableClass('post').mergeMap = {
- '*': first,
- 'tableTag': equal,
- 'formatType': max,
- 'isFixedPitch': min,
- 'minMemType42': max,
- 'maxMemType42': lambda lst: 0,
- 'minMemType1': max,
- 'maxMemType1': lambda lst: 0,
- 'mapping': onlyExisting(sumDicts),
- 'extraNames': lambda lst: [],
-}
-
-ttLib.getTableClass('vmtx').mergeMap = ttLib.getTableClass('hmtx').mergeMap = {
- 'tableTag': equal,
- 'metrics': sumDicts,
-}
-
-ttLib.getTableClass('name').mergeMap = {
- 'tableTag': equal,
- 'names': first, # FIXME? Does mixing name records make sense?
-}
-
-ttLib.getTableClass('loca').mergeMap = {
- '*': recalculate,
- 'tableTag': equal,
-}
-
-ttLib.getTableClass('glyf').mergeMap = {
- 'tableTag': equal,
- 'glyphs': sumDicts,
- 'glyphOrder': sumLists,
-}
-
-@_add_method(ttLib.getTableClass('glyf'))
-def merge(self, m, tables):
- for i,table in enumerate(tables):
- for g in table.glyphs.values():
- if i:
- # Drop hints for all but first font, since
- # we don't map functions / CVT values.
- g.removeHinting()
- # Expand composite glyphs to load their
- # composite glyph names.
- if g.isComposite():
- g.expand(table)
- return DefaultTable.merge(self, m, tables)
-
-ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst)
-ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst)
-ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst)
-ttLib.getTableClass('gasp').mergeMap = lambda self, lst: first(lst) # FIXME? Appears irreconcilable
-
-def _glyphsAreSame(glyphSet1, glyphSet2, glyph1, glyph2):
- pen1 = DecomposingRecordingPen(glyphSet1)
- pen2 = DecomposingRecordingPen(glyphSet2)
- g1 = glyphSet1[glyph1]
- g2 = glyphSet2[glyph2]
- g1.draw(pen1)
- g2.draw(pen2)
- return (pen1.value == pen2.value and
- g1.width == g2.width and
- (not hasattr(g1, 'height') or g1.height == g2.height))
-
-# Valid (format, platformID, platEncID) triplets for cmap subtables containing
-# Unicode BMP-only and Unicode Full Repertoire semantics.
-# Cf. OpenType spec for "Platform specific encodings":
-# https://docs.microsoft.com/en-us/typography/opentype/spec/name
-class CmapUnicodePlatEncodings:
- BMP = {(4, 3, 1), (4, 0, 3), (4, 0, 4), (4, 0, 6)}
- FullRepertoire = {(12, 3, 10), (12, 0, 4), (12, 0, 6)}
-
-@_add_method(ttLib.getTableClass('cmap'))
-def merge(self, m, tables):
- # TODO Handle format=14.
- # Only merge format 4 and 12 Unicode subtables, ignores all other subtables
- # If there is a format 12 table for the same font, ignore the format 4 table
- cmapTables = []
- for fontIdx,table in enumerate(tables):
- format4 = None
- format12 = None
- for subtable in table.tables:
- properties = (subtable.format, subtable.platformID, subtable.platEncID)
- if properties in CmapUnicodePlatEncodings.BMP:
- format4 = subtable
- elif properties in CmapUnicodePlatEncodings.FullRepertoire:
- format12 = subtable
- else:
- log.warning(
- "Dropped cmap subtable from font [%s]:\t"
- "format %2s, platformID %2s, platEncID %2s",
- fontIdx, subtable.format, subtable.platformID, subtable.platEncID
- )
- if format12 is not None:
- cmapTables.append((format12, fontIdx))
- elif format4 is not None:
- cmapTables.append((format4, fontIdx))
-
- # Build a unicode mapping, then decide which format is needed to store it.
- cmap = {}
- fontIndexForGlyph = {}
- glyphSets = [None for f in m.fonts] if hasattr(m, 'fonts') else None
- for table,fontIdx in cmapTables:
- # handle duplicates
- for uni,gid in table.cmap.items():
- oldgid = cmap.get(uni, None)
- if oldgid is None:
- cmap[uni] = gid
- fontIndexForGlyph[gid] = fontIdx
- elif oldgid != gid:
- # Char previously mapped to oldgid, now to gid.
- # Record, to fix up in GSUB 'locl' later.
- if m.duplicateGlyphsPerFont[fontIdx].get(oldgid) is None:
- if glyphSets is not None:
- oldFontIdx = fontIndexForGlyph[oldgid]
- for idx in (fontIdx, oldFontIdx):
- if glyphSets[idx] is None:
- glyphSets[idx] = m.fonts[idx].getGlyphSet()
- if _glyphsAreSame(glyphSets[oldFontIdx], glyphSets[fontIdx], oldgid, gid):
- continue
- m.duplicateGlyphsPerFont[fontIdx][oldgid] = gid
- elif m.duplicateGlyphsPerFont[fontIdx][oldgid] != gid:
- # Char previously mapped to oldgid but oldgid is already remapped to a different
- # gid, because of another Unicode character.
- # TODO: Try harder to do something about these.
- log.warning("Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid)
-
- cmapBmpOnly = {uni: gid for uni,gid in cmap.items() if uni <= 0xFFFF}
- self.tables = []
- module = ttLib.getTableModule('cmap')
- if len(cmapBmpOnly) != len(cmap):
- # format-12 required.
- cmapTable = module.cmap_classes[12](12)
- cmapTable.platformID = 3
- cmapTable.platEncID = 10
- cmapTable.language = 0
- cmapTable.cmap = cmap
- self.tables.append(cmapTable)
- # always create format-4
- cmapTable = module.cmap_classes[4](4)
- cmapTable.platformID = 3
- cmapTable.platEncID = 1
- cmapTable.language = 0
- cmapTable.cmap = cmapBmpOnly
- # ordered by platform then encoding
- self.tables.insert(0, cmapTable)
- self.tableVersion = 0
- self.numSubTables = len(self.tables)
- return self
-
-
-def mergeLookupLists(lst):
- # TODO Do smarter merge.
- return sumLists(lst)
-
-def mergeFeatures(lst):
- assert lst
- self = otTables.Feature()
- self.FeatureParams = None
- self.LookupListIndex = mergeLookupLists([l.LookupListIndex for l in lst if l.LookupListIndex])
- self.LookupCount = len(self.LookupListIndex)
- return self
-
-def mergeFeatureLists(lst):
- d = {}
- for l in lst:
- for f in l:
- tag = f.FeatureTag
- if tag not in d:
- d[tag] = []
- d[tag].append(f.Feature)
- ret = []
- for tag in sorted(d.keys()):
- rec = otTables.FeatureRecord()
- rec.FeatureTag = tag
- rec.Feature = mergeFeatures(d[tag])
- ret.append(rec)
- return ret
-
-def mergeLangSyses(lst):
- assert lst
-
- # TODO Support merging ReqFeatureIndex
- assert all(l.ReqFeatureIndex == 0xFFFF for l in lst)
-
- self = otTables.LangSys()
- self.LookupOrder = None
- self.ReqFeatureIndex = 0xFFFF
- self.FeatureIndex = mergeFeatureLists([l.FeatureIndex for l in lst if l.FeatureIndex])
- self.FeatureCount = len(self.FeatureIndex)
- return self
-
-def mergeScripts(lst):
- assert lst
-
- if len(lst) == 1:
- return lst[0]
- langSyses = {}
- for sr in lst:
- for lsr in sr.LangSysRecord:
- if lsr.LangSysTag not in langSyses:
- langSyses[lsr.LangSysTag] = []
- langSyses[lsr.LangSysTag].append(lsr.LangSys)
- lsrecords = []
- for tag, langSys_list in sorted(langSyses.items()):
- lsr = otTables.LangSysRecord()
- lsr.LangSys = mergeLangSyses(langSys_list)
- lsr.LangSysTag = tag
- lsrecords.append(lsr)
-
- self = otTables.Script()
- self.LangSysRecord = lsrecords
- self.LangSysCount = len(lsrecords)
- dfltLangSyses = [s.DefaultLangSys for s in lst if s.DefaultLangSys]
- if dfltLangSyses:
- self.DefaultLangSys = mergeLangSyses(dfltLangSyses)
- else:
- self.DefaultLangSys = None
- return self
-
-def mergeScriptRecords(lst):
- d = {}
- for l in lst:
- for s in l:
- tag = s.ScriptTag
- if tag not in d:
- d[tag] = []
- d[tag].append(s.Script)
- ret = []
- for tag in sorted(d.keys()):
- rec = otTables.ScriptRecord()
- rec.ScriptTag = tag
- rec.Script = mergeScripts(d[tag])
- ret.append(rec)
- return ret
-
-otTables.ScriptList.mergeMap = {
- 'ScriptCount': lambda lst: None, # TODO
- 'ScriptRecord': mergeScriptRecords,
-}
-otTables.BaseScriptList.mergeMap = {
- 'BaseScriptCount': lambda lst: None, # TODO
- # TODO: Merge duplicate entries
- 'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag),
-}
-
-otTables.FeatureList.mergeMap = {
- 'FeatureCount': sum,
- 'FeatureRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),
-}
-
-otTables.LookupList.mergeMap = {
- 'LookupCount': sum,
- 'Lookup': sumLists,
-}
-
-otTables.Coverage.mergeMap = {
- 'Format': min,
- 'glyphs': sumLists,
-}
-
-otTables.ClassDef.mergeMap = {
- 'Format': min,
- 'classDefs': sumDicts,
-}
-
-otTables.LigCaretList.mergeMap = {
- 'Coverage': mergeObjects,
- 'LigGlyphCount': sum,
- 'LigGlyph': sumLists,
-}
-
-otTables.AttachList.mergeMap = {
- 'Coverage': mergeObjects,
- 'GlyphCount': sum,
- 'AttachPoint': sumLists,
-}
-
-# XXX Renumber MarkFilterSets of lookups
-otTables.MarkGlyphSetsDef.mergeMap = {
- 'MarkSetTableFormat': equal,
- 'MarkSetCount': sum,
- 'Coverage': sumLists,
-}
-
-otTables.Axis.mergeMap = {
- '*': mergeObjects,
-}
-
-# XXX Fix BASE table merging
-otTables.BaseTagList.mergeMap = {
- 'BaseTagCount': sum,
- 'BaselineTag': sumLists,
-}
-
-otTables.GDEF.mergeMap = \
-otTables.GSUB.mergeMap = \
-otTables.GPOS.mergeMap = \
-otTables.BASE.mergeMap = \
-otTables.JSTF.mergeMap = \
-otTables.MATH.mergeMap = \
-{
- '*': mergeObjects,
- 'Version': max,
-}
-
-ttLib.getTableClass('GDEF').mergeMap = \
-ttLib.getTableClass('GSUB').mergeMap = \
-ttLib.getTableClass('GPOS').mergeMap = \
-ttLib.getTableClass('BASE').mergeMap = \
-ttLib.getTableClass('JSTF').mergeMap = \
-ttLib.getTableClass('MATH').mergeMap = \
-{
- 'tableTag': onlyExisting(equal), # XXX clean me up
- 'table': mergeObjects,
-}
-
-@_add_method(ttLib.getTableClass('GSUB'))
-def merge(self, m, tables):
-
- assert len(tables) == len(m.duplicateGlyphsPerFont)
- for i,(table,dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):
- if not dups: continue
- assert (table is not None and table is not NotImplemented), "Have duplicates to resolve for font %d but no GSUB: %s" % (i + 1, dups)
- synthFeature = None
- synthLookup = None
- for script in table.table.ScriptList.ScriptRecord:
- if script.ScriptTag == 'DFLT': continue # XXX
- for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]:
- if langsys is None: continue # XXX Create!
- feature = [v for v in langsys.FeatureIndex if v.FeatureTag == 'locl']
- assert len(feature) <= 1
- if feature:
- feature = feature[0]
- else:
- if not synthFeature:
- synthFeature = otTables.FeatureRecord()
- synthFeature.FeatureTag = 'locl'
- f = synthFeature.Feature = otTables.Feature()
- f.FeatureParams = None
- f.LookupCount = 0
- f.LookupListIndex = []
- langsys.FeatureIndex.append(synthFeature)
- langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag)
- table.table.FeatureList.FeatureRecord.append(synthFeature)
- table.table.FeatureList.FeatureCount += 1
- feature = synthFeature
-
- if not synthLookup:
- subtable = otTables.SingleSubst()
- subtable.mapping = dups
- synthLookup = otTables.Lookup()
- synthLookup.LookupFlag = 0
- synthLookup.LookupType = 1
- synthLookup.SubTableCount = 1
- synthLookup.SubTable = [subtable]
- if table.table.LookupList is None:
- # mtiLib uses None as default value for LookupList,
- # while feaLib points to an empty array with count 0
- # TODO: make them do the same
- table.table.LookupList = otTables.LookupList()
- table.table.LookupList.Lookup = []
- table.table.LookupList.LookupCount = 0
- table.table.LookupList.Lookup.append(synthLookup)
- table.table.LookupList.LookupCount += 1
-
- feature.Feature.LookupListIndex[:0] = [synthLookup]
- feature.Feature.LookupCount += 1
-
- DefaultTable.merge(self, m, tables)
- return self
-
-@_add_method(otTables.SingleSubst,
- otTables.MultipleSubst,
- otTables.AlternateSubst,
- otTables.LigatureSubst,
- otTables.ReverseChainSingleSubst,
- otTables.SinglePos,
- otTables.PairPos,
- otTables.CursivePos,
- otTables.MarkBasePos,
- otTables.MarkLigPos,
- otTables.MarkMarkPos)
-def mapLookups(self, lookupMap):
- pass
-
-# Copied and trimmed down from subset.py
-@_add_method(otTables.ContextSubst,
- otTables.ChainContextSubst,
- otTables.ContextPos,
- otTables.ChainContextPos)
-def __merge_classify_context(self):
-
- class ContextHelper(object):
- def __init__(self, klass, Format):
- if klass.__name__.endswith('Subst'):
- Typ = 'Sub'
- Type = 'Subst'
- else:
- Typ = 'Pos'
- Type = 'Pos'
- if klass.__name__.startswith('Chain'):
- Chain = 'Chain'
- else:
- Chain = ''
- ChainTyp = Chain+Typ
-
- self.Typ = Typ
- self.Type = Type
- self.Chain = Chain
- self.ChainTyp = ChainTyp
-
- self.LookupRecord = Type+'LookupRecord'
-
- if Format == 1:
- self.Rule = ChainTyp+'Rule'
- self.RuleSet = ChainTyp+'RuleSet'
- elif Format == 2:
- self.Rule = ChainTyp+'ClassRule'
- self.RuleSet = ChainTyp+'ClassSet'
-
- if self.Format not in [1, 2, 3]:
- return None # Don't shoot the messenger; let it go
- if not hasattr(self.__class__, "_merge__ContextHelpers"):
- self.__class__._merge__ContextHelpers = {}
- if self.Format not in self.__class__._merge__ContextHelpers:
- helper = ContextHelper(self.__class__, self.Format)
- self.__class__._merge__ContextHelpers[self.Format] = helper
- return self.__class__._merge__ContextHelpers[self.Format]
-
-
-@_add_method(otTables.ContextSubst,
- otTables.ChainContextSubst,
- otTables.ContextPos,
- otTables.ChainContextPos)
-def mapLookups(self, lookupMap):
- c = self.__merge_classify_context()
-
- if self.Format in [1, 2]:
- for rs in getattr(self, c.RuleSet):
- if not rs: continue
- for r in getattr(rs, c.Rule):
- if not r: continue
- for ll in getattr(r, c.LookupRecord):
- if not ll: continue
- ll.LookupListIndex = lookupMap[ll.LookupListIndex]
- elif self.Format == 3:
- for ll in getattr(self, c.LookupRecord):
- if not ll: continue
- ll.LookupListIndex = lookupMap[ll.LookupListIndex]
- else:
- assert 0, "unknown format: %s" % self.Format
-
-@_add_method(otTables.ExtensionSubst,
- otTables.ExtensionPos)
-def mapLookups(self, lookupMap):
- if self.Format == 1:
- self.ExtSubTable.mapLookups(lookupMap)
- else:
- assert 0, "unknown format: %s" % self.Format
-
-@_add_method(otTables.Lookup)
-def mapLookups(self, lookupMap):
- for st in self.SubTable:
- if not st: continue
- st.mapLookups(lookupMap)
-
-@_add_method(otTables.LookupList)
-def mapLookups(self, lookupMap):
- for l in self.Lookup:
- if not l: continue
- l.mapLookups(lookupMap)
-
-@_add_method(otTables.Feature)
-def mapLookups(self, lookupMap):
- self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]
-
-@_add_method(otTables.FeatureList)
-def mapLookups(self, lookupMap):
- for f in self.FeatureRecord:
- if not f or not f.Feature: continue
- f.Feature.mapLookups(lookupMap)
-
-@_add_method(otTables.DefaultLangSys,
- otTables.LangSys)
-def mapFeatures(self, featureMap):
- self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]
- if self.ReqFeatureIndex != 65535:
- self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]
-
-@_add_method(otTables.Script)
-def mapFeatures(self, featureMap):
- if self.DefaultLangSys:
- self.DefaultLangSys.mapFeatures(featureMap)
- for l in self.LangSysRecord:
- if not l or not l.LangSys: continue
- l.LangSys.mapFeatures(featureMap)
-
-@_add_method(otTables.ScriptList)
-def mapFeatures(self, featureMap):
- for s in self.ScriptRecord:
- if not s or not s.Script: continue
- s.Script.mapFeatures(featureMap)
-
-
-class Options(object):
-
- class UnknownOptionError(Exception):
- pass
-
- def __init__(self, **kwargs):
-
- self.verbose = False
- self.timing = False
-
- self.set(**kwargs)
-
- def set(self, **kwargs):
- for k,v in kwargs.items():
- if not hasattr(self, k):
- raise self.UnknownOptionError("Unknown option '%s'" % k)
- setattr(self, k, v)
-
- def parse_opts(self, argv, ignore_unknown=[]):
- ret = []
- opts = {}
- for a in argv:
- orig_a = a
- if not a.startswith('--'):
- ret.append(a)
- continue
- a = a[2:]
- i = a.find('=')
- op = '='
- if i == -1:
- if a.startswith("no-"):
- k = a[3:]
- v = False
- else:
- k = a
- v = True
- else:
- k = a[:i]
- if k[-1] in "-+":
- op = k[-1]+'=' # Ops is '-=' or '+=' now.
- k = k[:-1]
- v = a[i+1:]
- k = k.replace('-', '_')
- if not hasattr(self, k):
- if ignore_unknown is True or k in ignore_unknown:
- ret.append(orig_a)
- continue
- else:
- raise self.UnknownOptionError("Unknown option '%s'" % a)
-
- ov = getattr(self, k)
- if isinstance(ov, bool):
- v = bool(v)
- elif isinstance(ov, int):
- v = int(v)
- elif isinstance(ov, list):
- vv = v.split(',')
- if vv == ['']:
- vv = []
- vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
- if op == '=':
- v = vv
- elif op == '+=':
- v = ov
- v.extend(vv)
- elif op == '-=':
- v = ov
- for x in vv:
- if x in v:
- v.remove(x)
- else:
- assert 0
-
- opts[k] = v
- self.set(**opts)
-
- return ret
-
-class _AttendanceRecordingIdentityDict(object):
- """A dictionary-like object that records indices of items actually accessed
- from a list."""
-
- def __init__(self, lst):
- self.l = lst
- self.d = {id(v):i for i,v in enumerate(lst)}
- self.s = set()
-
- def __getitem__(self, v):
- self.s.add(self.d[id(v)])
- return v
-
-class _GregariousIdentityDict(object):
- """A dictionary-like object that welcomes guests without reservations and
- adds them to the end of the guest list."""
-
- def __init__(self, lst):
- self.l = lst
- self.s = set(id(v) for v in lst)
-
- def __getitem__(self, v):
- if id(v) not in self.s:
- self.s.add(id(v))
- self.l.append(v)
- return v
-
-class _NonhashableDict(object):
- """A dictionary-like object mapping objects to values."""
-
- def __init__(self, keys, values=None):
- if values is None:
- self.d = {id(v):i for i,v in enumerate(keys)}
- else:
- self.d = {id(k):v for k,v in zip(keys, values)}
-
- def __getitem__(self, k):
- return self.d[id(k)]
-
- def __setitem__(self, k, v):
- self.d[id(k)] = v
-
- def __delitem__(self, k):
- del self.d[id(k)]
-
-class Merger(object):
- """Font merger.
-
- This class merges multiple files into a single OpenType font, taking into
- account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and
- cross-font metrics (e.g. ``hhea.ascent`` is set to the maximum value across
- all the fonts).
-
- If multiple glyphs map to the same Unicode value, and the glyphs are considered
- sufficiently different (that is, they differ in any of paths, widths, or
- height), then subsequent glyphs are renamed and a lookup in the ``locl``
- feature will be created to disambiguate them. For example, if the arguments
- are an Arabic font and a Latin font and both contain a set of parentheses,
- the Latin glyphs will be renamed to ``parenleft#1`` and ``parenright#1``,
- and a lookup will be inserted into the to ``locl`` feature (creating it if
- necessary) under the ``latn`` script to substitute ``parenleft`` with
- ``parenleft#1`` etc.
-
- Restrictions:
-
- - All fonts must currently have TrueType outlines (``glyf`` table).
- Merging fonts with CFF outlines is not supported.
- - All fonts must have the same units per em.
- - If duplicate glyph disambiguation takes place as described above then the
- fonts must have a ``GSUB`` table.
-
- Attributes:
- options: Currently unused.
- """
-
- def __init__(self, options=None):
-
- if not options:
- options = Options()
-
- self.options = options
-
- def merge(self, fontfiles):
- """Merges fonts together.
-
- Args:
- fontfiles: A list of file names to be merged
-
- Returns:
- A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on
- this to write it out to an OTF file.
- """
- mega = ttLib.TTFont()
-
- #
- # Settle on a mega glyph order.
- #
- fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
- glyphOrders = [font.getGlyphOrder() for font in fonts]
- megaGlyphOrder = self._mergeGlyphOrders(glyphOrders)
- # Reload fonts and set new glyph names on them.
- # TODO Is it necessary to reload font? I think it is. At least
- # it's safer, in case tables were loaded to provide glyph names.
- fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
- for font,glyphOrder in zip(fonts, glyphOrders):
- font.setGlyphOrder(glyphOrder)
- mega.setGlyphOrder(megaGlyphOrder)
-
- for font in fonts:
- self._preMerge(font)
-
- self.fonts = fonts
- self.duplicateGlyphsPerFont = [{} for _ in fonts]
-
- allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
- allTags.remove('GlyphOrder')
-
- # Make sure we process cmap before GSUB as we have a dependency there.
- if 'GSUB' in allTags:
- allTags.remove('GSUB')
- allTags = ['GSUB'] + list(allTags)
- if 'cmap' in allTags:
- allTags.remove('cmap')
- allTags = ['cmap'] + list(allTags)
-
- for tag in allTags:
- with timer("merge '%s'" % tag):
- tables = [font.get(tag, NotImplemented) for font in fonts]
-
- log.info("Merging '%s'.", tag)
- clazz = ttLib.getTableClass(tag)
- table = clazz(tag).merge(self, tables)
- # XXX Clean this up and use: table = mergeObjects(tables)
-
- if table is not NotImplemented and table is not False:
- mega[tag] = table
- log.info("Merged '%s'.", tag)
- else:
- log.info("Dropped '%s'.", tag)
-
- del self.duplicateGlyphsPerFont
- del self.fonts
-
- self._postMerge(mega)
-
- return mega
-
- def _mergeGlyphOrders(self, glyphOrders):
- """Modifies passed-in glyphOrders to reflect new glyph names.
- Returns glyphOrder for the merged font."""
- mega = {}
- for glyphOrder in glyphOrders:
- for i,glyphName in enumerate(glyphOrder):
- if glyphName in mega:
- n = mega[glyphName]
- while (glyphName + "#" + repr(n)) in mega:
- n += 1
- mega[glyphName] = n
- glyphName += "#" + repr(n)
- glyphOrder[i] = glyphName
- mega[glyphName] = 1
- return list(mega.keys())
-
- def mergeObjects(self, returnTable, logic, tables):
- # Right now we don't use self at all. Will use in the future
- # for options and logging.
-
- allKeys = set.union(set(), *(vars(table).keys() for table in tables if table is not NotImplemented))
- for key in allKeys:
- try:
- mergeLogic = logic[key]
- except KeyError:
- try:
- mergeLogic = logic['*']
- except KeyError:
- raise Exception("Don't know how to merge key %s of class %s" %
- (key, returnTable.__class__.__name__))
- if mergeLogic is NotImplemented:
- continue
- value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
- if value is not NotImplemented:
- setattr(returnTable, key, value)
-
- return returnTable
-
- def _preMerge(self, font):
-
- # Map indices to references
-
- GDEF = font.get('GDEF')
- GSUB = font.get('GSUB')
- GPOS = font.get('GPOS')
-
- for t in [GSUB, GPOS]:
- if not t: continue
-
- if t.table.LookupList:
- lookupMap = {i:v for i,v in enumerate(t.table.LookupList.Lookup)}
- t.table.LookupList.mapLookups(lookupMap)
- t.table.FeatureList.mapLookups(lookupMap)
-
- if t.table.FeatureList and t.table.ScriptList:
- featureMap = {i:v for i,v in enumerate(t.table.FeatureList.FeatureRecord)}
- t.table.ScriptList.mapFeatures(featureMap)
-
- # TODO GDEF/Lookup MarkFilteringSets
- # TODO FeatureParams nameIDs
-
- def _postMerge(self, font):
-
- # Map references back to indices
-
- GDEF = font.get('GDEF')
- GSUB = font.get('GSUB')
- GPOS = font.get('GPOS')
-
- for t in [GSUB, GPOS]:
- if not t: continue
-
- if t.table.FeatureList and t.table.ScriptList:
-
- # Collect unregistered (new) features.
- featureMap = _GregariousIdentityDict(t.table.FeatureList.FeatureRecord)
- t.table.ScriptList.mapFeatures(featureMap)
-
- # Record used features.
- featureMap = _AttendanceRecordingIdentityDict(t.table.FeatureList.FeatureRecord)
- t.table.ScriptList.mapFeatures(featureMap)
- usedIndices = featureMap.s
-
- # Remove unused features
- t.table.FeatureList.FeatureRecord = [f for i,f in enumerate(t.table.FeatureList.FeatureRecord) if i in usedIndices]
-
- # Map back to indices.
- featureMap = _NonhashableDict(t.table.FeatureList.FeatureRecord)
- t.table.ScriptList.mapFeatures(featureMap)
-
- t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord)
-
- if t.table.LookupList:
-
- # Collect unregistered (new) lookups.
- lookupMap = _GregariousIdentityDict(t.table.LookupList.Lookup)
- t.table.FeatureList.mapLookups(lookupMap)
- t.table.LookupList.mapLookups(lookupMap)
-
- # Record used lookups.
- lookupMap = _AttendanceRecordingIdentityDict(t.table.LookupList.Lookup)
- t.table.FeatureList.mapLookups(lookupMap)
- t.table.LookupList.mapLookups(lookupMap)
- usedIndices = lookupMap.s
-
- # Remove unused lookups
- t.table.LookupList.Lookup = [l for i,l in enumerate(t.table.LookupList.Lookup) if i in usedIndices]
-
- # Map back to indices.
- lookupMap = _NonhashableDict(t.table.LookupList.Lookup)
- t.table.FeatureList.mapLookups(lookupMap)
- t.table.LookupList.mapLookups(lookupMap)
-
- t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup)
-
- # TODO GDEF/Lookup MarkFilteringSets
- # TODO FeatureParams nameIDs
-
-
-__all__ = [
- 'Options',
- 'Merger',
- 'main'
-]
-
-@timer("make one with everything (TOTAL TIME)")
-def main(args=None):
- """Merge multiple fonts into one"""
- from fontTools import configLogger
-
- if args is None:
- args = sys.argv[1:]
-
- options = Options()
- args = options.parse_opts(args)
-
- if len(args) < 1:
- print("usage: pyftmerge font...", file=sys.stderr)
- return 1
-
- configLogger(level=logging.INFO if options.verbose else logging.WARNING)
- if options.timing:
- timer.logger.setLevel(logging.DEBUG)
- else:
- timer.logger.disabled = True
-
- merger = Merger(options=options)
- font = merger.merge(args)
- outfile = 'merged.ttf'
- with timer("compile and save font"):
- font.save(outfile)
-
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/Lib/fontTools/merge/__init__.py b/Lib/fontTools/merge/__init__.py
new file mode 100644
index 00000000..152bf079
--- /dev/null
+++ b/Lib/fontTools/merge/__init__.py
@@ -0,0 +1,200 @@
+# Copyright 2013 Google, Inc. All Rights Reserved.
+#
+# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
+
+from fontTools import ttLib
+import fontTools.merge.base
+from fontTools.merge.cmap import computeMegaGlyphOrder, computeMegaCmap, renameCFFCharStrings
+from fontTools.merge.layout import layoutPreMerge, layoutPostMerge
+from fontTools.merge.options import Options
+import fontTools.merge.tables
+from fontTools.misc.loggingTools import Timer
+from functools import reduce
+import sys
+import logging
+
+
+log = logging.getLogger("fontTools.merge")
+timer = Timer(logger=logging.getLogger(__name__+".timer"), level=logging.INFO)
+
+
+class Merger(object):
+ """Font merger.
+
+ This class merges multiple files into a single OpenType font, taking into
+ account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and
+ cross-font metrics (e.g. ``hhea.ascent`` is set to the maximum value across
+ all the fonts).
+
+ If multiple glyphs map to the same Unicode value, and the glyphs are considered
+ sufficiently different (that is, they differ in any of paths, widths, or
+ height), then subsequent glyphs are renamed and a lookup in the ``locl``
+ feature will be created to disambiguate them. For example, if the arguments
+ are an Arabic font and a Latin font and both contain a set of parentheses,
+ the Latin glyphs will be renamed to ``parenleft#1`` and ``parenright#1``,
+ and a lookup will be inserted into the to ``locl`` feature (creating it if
+ necessary) under the ``latn`` script to substitute ``parenleft`` with
+ ``parenleft#1`` etc.
+
+ Restrictions:
+
+ - All fonts must have the same units per em.
+ - If duplicate glyph disambiguation takes place as described above then the
+ fonts must have a ``GSUB`` table.
+
+ Attributes:
+ options: Currently unused.
+ """
+
+ def __init__(self, options=None):
+
+ if not options:
+ options = Options()
+
+ self.options = options
+
+ def _openFonts(self, fontfiles):
+ fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
+ for font,fontfile in zip(fonts, fontfiles):
+ font._merger__fontfile = fontfile
+ font._merger__name = font['name'].getDebugName(4)
+ return fonts
+
+ def merge(self, fontfiles):
+ """Merges fonts together.
+
+ Args:
+ fontfiles: A list of file names to be merged
+
+ Returns:
+ A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on
+ this to write it out to an OTF file.
+ """
+ #
+ # Settle on a mega glyph order.
+ #
+ fonts = self._openFonts(fontfiles)
+ glyphOrders = [list(font.getGlyphOrder()) for font in fonts]
+ computeMegaGlyphOrder(self, glyphOrders)
+
+ # Take first input file sfntVersion
+ sfntVersion = fonts[0].sfntVersion
+
+ # Reload fonts and set new glyph names on them.
+ fonts = self._openFonts(fontfiles)
+ for font,glyphOrder in zip(fonts, glyphOrders):
+ font.setGlyphOrder(glyphOrder)
+ if 'CFF ' in font:
+ renameCFFCharStrings(self, glyphOrder, font['CFF '])
+
+ cmaps = [font['cmap'] for font in fonts]
+ self.duplicateGlyphsPerFont = [{} for _ in fonts]
+ computeMegaCmap(self, cmaps)
+
+ mega = ttLib.TTFont(sfntVersion=sfntVersion)
+ mega.setGlyphOrder(self.glyphOrder)
+
+ for font in fonts:
+ self._preMerge(font)
+
+ self.fonts = fonts
+
+ allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
+ allTags.remove('GlyphOrder')
+
+ for tag in allTags:
+ if tag in self.options.drop_tables:
+ continue
+
+ with timer("merge '%s'" % tag):
+ tables = [font.get(tag, NotImplemented) for font in fonts]
+
+ log.info("Merging '%s'.", tag)
+ clazz = ttLib.getTableClass(tag)
+ table = clazz(tag).merge(self, tables)
+ # XXX Clean this up and use: table = mergeObjects(tables)
+
+ if table is not NotImplemented and table is not False:
+ mega[tag] = table
+ log.info("Merged '%s'.", tag)
+ else:
+ log.info("Dropped '%s'.", tag)
+
+ del self.duplicateGlyphsPerFont
+ del self.fonts
+
+ self._postMerge(mega)
+
+ return mega
+
+ def mergeObjects(self, returnTable, logic, tables):
+ # Right now we don't use self at all. Will use in the future
+ # for options and logging.
+
+ allKeys = set.union(set(), *(vars(table).keys() for table in tables if table is not NotImplemented))
+ for key in allKeys:
+ try:
+ mergeLogic = logic[key]
+ except KeyError:
+ try:
+ mergeLogic = logic['*']
+ except KeyError:
+ raise Exception("Don't know how to merge key %s of class %s" %
+ (key, returnTable.__class__.__name__))
+ if mergeLogic is NotImplemented:
+ continue
+ value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
+ if value is not NotImplemented:
+ setattr(returnTable, key, value)
+
+ return returnTable
+
+ def _preMerge(self, font):
+ layoutPreMerge(font)
+
+ def _postMerge(self, font):
+ layoutPostMerge(font)
+
+
+__all__ = [
+ 'Options',
+ 'Merger',
+ 'main'
+]
+
+@timer("make one with everything (TOTAL TIME)")
+def main(args=None):
+ """Merge multiple fonts into one"""
+ from fontTools import configLogger
+
+ if args is None:
+ args = sys.argv[1:]
+
+ options = Options()
+ args = options.parse_opts(args, ignore_unknown=['output-file'])
+ outfile = 'merged.ttf'
+ fontfiles = []
+ for g in args:
+ if g.startswith('--output-file='):
+ outfile = g[14:]
+ continue
+ fontfiles.append(g)
+
+ if len(args) < 1:
+ print("usage: pyftmerge font...", file=sys.stderr)
+ return 1
+
+ configLogger(level=logging.INFO if options.verbose else logging.WARNING)
+ if options.timing:
+ timer.logger.setLevel(logging.DEBUG)
+ else:
+ timer.logger.disabled = True
+
+ merger = Merger(options=options)
+ font = merger.merge(fontfiles)
+ with timer("compile and save font"):
+ font.save(outfile)
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/Lib/fontTools/merge/__main__.py b/Lib/fontTools/merge/__main__.py
new file mode 100644
index 00000000..623ca7d2
--- /dev/null
+++ b/Lib/fontTools/merge/__main__.py
@@ -0,0 +1,6 @@
+import sys
+from fontTools.merge import main
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/Lib/fontTools/merge/base.py b/Lib/fontTools/merge/base.py
new file mode 100644
index 00000000..868b51a4
--- /dev/null
+++ b/Lib/fontTools/merge/base.py
@@ -0,0 +1,76 @@
+# Copyright 2013 Google, Inc. All Rights Reserved.
+#
+# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
+
+from fontTools.ttLib.tables.DefaultTable import DefaultTable
+import logging
+
+
+log = logging.getLogger("fontTools.merge")
+
+
+def add_method(*clazzes, **kwargs):
+ """Returns a decorator function that adds a new method to one or
+ more classes."""
+ allowDefault = kwargs.get('allowDefaultTable', False)
+ def wrapper(method):
+ done = []
+ for clazz in clazzes:
+ if clazz in done: continue # Support multiple names of a clazz
+ done.append(clazz)
+ assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.'
+ assert method.__name__ not in clazz.__dict__, \
+ "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
+ setattr(clazz, method.__name__, method)
+ return None
+ return wrapper
+
+def mergeObjects(lst):
+ lst = [item for item in lst if item is not NotImplemented]
+ if not lst:
+ return NotImplemented
+ lst = [item for item in lst if item is not None]
+ if not lst:
+ return None
+
+ clazz = lst[0].__class__
+ assert all(type(item) == clazz for item in lst), lst
+
+ logic = clazz.mergeMap
+ returnTable = clazz()
+ returnDict = {}
+
+ allKeys = set.union(set(), *(vars(table).keys() for table in lst))
+ for key in allKeys:
+ try:
+ mergeLogic = logic[key]
+ except KeyError:
+ try:
+ mergeLogic = logic['*']
+ except KeyError:
+ raise Exception("Don't know how to merge key %s of class %s" %
+ (key, clazz.__name__))
+ if mergeLogic is NotImplemented:
+ continue
+ value = mergeLogic(getattr(table, key, NotImplemented) for table in lst)
+ if value is not NotImplemented:
+ returnDict[key] = value
+
+ returnTable.__dict__ = returnDict
+
+ return returnTable
+
+@add_method(DefaultTable, allowDefaultTable=True)
+def merge(self, m, tables):
+ if not hasattr(self, 'mergeMap'):
+ log.info("Don't know how to merge '%s'.", self.tableTag)
+ return NotImplemented
+
+ logic = self.mergeMap
+
+ if isinstance(logic, dict):
+ return m.mergeObjects(self, self.mergeMap, tables)
+ else:
+ return logic(tables)
+
+
diff --git a/Lib/fontTools/merge/cmap.py b/Lib/fontTools/merge/cmap.py
new file mode 100644
index 00000000..7ade4ac9
--- /dev/null
+++ b/Lib/fontTools/merge/cmap.py
@@ -0,0 +1,129 @@
+# Copyright 2013 Google, Inc. All Rights Reserved.
+#
+# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
+
+from fontTools.merge.unicode import is_Default_Ignorable
+from fontTools.pens.recordingPen import DecomposingRecordingPen
+import logging
+
+
+log = logging.getLogger("fontTools.merge")
+
+
+def computeMegaGlyphOrder(merger, glyphOrders):
+ """Modifies passed-in glyphOrders to reflect new glyph names.
+ Stores merger.glyphOrder."""
+ megaOrder = {}
+ for glyphOrder in glyphOrders:
+ for i,glyphName in enumerate(glyphOrder):
+ if glyphName in megaOrder:
+ n = megaOrder[glyphName]
+ while (glyphName + "#" + repr(n)) in megaOrder:
+ n += 1
+ megaOrder[glyphName] = n
+ glyphName += "#" + repr(n)
+ glyphOrder[i] = glyphName
+ megaOrder[glyphName] = 1
+ merger.glyphOrder = megaOrder = list(megaOrder.keys())
+
+
+def _glyphsAreSame(glyphSet1, glyphSet2, glyph1, glyph2,
+ advanceTolerance=.05,
+ advanceToleranceEmpty=.20):
+ pen1 = DecomposingRecordingPen(glyphSet1)
+ pen2 = DecomposingRecordingPen(glyphSet2)
+ g1 = glyphSet1[glyph1]
+ g2 = glyphSet2[glyph2]
+ g1.draw(pen1)
+ g2.draw(pen2)
+ if pen1.value != pen2.value:
+ return False
+ # Allow more width tolerance for glyphs with no ink
+ tolerance = advanceTolerance if pen1.value else advanceToleranceEmpty
+ # TODO Warn if advances not the same but within tolerance.
+ if abs(g1.width - g2.width) > g1.width * tolerance:
+ return False
+ if hasattr(g1, 'height') and g1.height is not None:
+ if abs(g1.height - g2.height) > g1.height * tolerance:
+ return False
+ return True
+
+# Valid (format, platformID, platEncID) triplets for cmap subtables containing
+# Unicode BMP-only and Unicode Full Repertoire semantics.
+# Cf. OpenType spec for "Platform specific encodings":
+# https://docs.microsoft.com/en-us/typography/opentype/spec/name
+class _CmapUnicodePlatEncodings:
+ BMP = {(4, 3, 1), (4, 0, 3), (4, 0, 4), (4, 0, 6)}
+ FullRepertoire = {(12, 3, 10), (12, 0, 4), (12, 0, 6)}
+
+def computeMegaCmap(merger, cmapTables):
+ """Sets merger.cmap and merger.glyphOrder."""
+
+ # TODO Handle format=14.
+ # Only merge format 4 and 12 Unicode subtables, ignores all other subtables
+ # If there is a format 12 table for a font, ignore the format 4 table of it
+ chosenCmapTables = []
+ for fontIdx,table in enumerate(cmapTables):
+ format4 = None
+ format12 = None
+ for subtable in table.tables:
+ properties = (subtable.format, subtable.platformID, subtable.platEncID)
+ if properties in _CmapUnicodePlatEncodings.BMP:
+ format4 = subtable
+ elif properties in _CmapUnicodePlatEncodings.FullRepertoire:
+ format12 = subtable
+ else:
+ log.warning(
+ "Dropped cmap subtable from font '%s':\t"
+ "format %2s, platformID %2s, platEncID %2s",
+ fontIdx, subtable.format, subtable.platformID, subtable.platEncID
+ )
+ if format12 is not None:
+ chosenCmapTables.append((format12, fontIdx))
+ elif format4 is not None:
+ chosenCmapTables.append((format4, fontIdx))
+
+ # Build the unicode mapping
+ merger.cmap = cmap = {}
+ fontIndexForGlyph = {}
+ glyphSets = [None for f in merger.fonts] if hasattr(merger, 'fonts') else None
+
+ for table,fontIdx in chosenCmapTables:
+ # handle duplicates
+ for uni,gid in table.cmap.items():
+ oldgid = cmap.get(uni, None)
+ if oldgid is None:
+ cmap[uni] = gid
+ fontIndexForGlyph[gid] = fontIdx
+ elif is_Default_Ignorable(uni) or uni in (0x25CC,): # U+25CC DOTTED CIRCLE
+ continue
+ elif oldgid != gid:
+ # Char previously mapped to oldgid, now to gid.
+ # Record, to fix up in GSUB 'locl' later.
+ if merger.duplicateGlyphsPerFont[fontIdx].get(oldgid) is None:
+ if glyphSets is not None:
+ oldFontIdx = fontIndexForGlyph[oldgid]
+ for idx in (fontIdx, oldFontIdx):
+ if glyphSets[idx] is None:
+ glyphSets[idx] = merger.fonts[idx].getGlyphSet()
+ #if _glyphsAreSame(glyphSets[oldFontIdx], glyphSets[fontIdx], oldgid, gid):
+ # continue
+ merger.duplicateGlyphsPerFont[fontIdx][oldgid] = gid
+ elif merger.duplicateGlyphsPerFont[fontIdx][oldgid] != gid:
+ # Char previously mapped to oldgid but oldgid is already remapped to a different
+ # gid, because of another Unicode character.
+ # TODO: Try harder to do something about these.
+ log.warning("Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid)
+
+
+def renameCFFCharStrings(merger, glyphOrder, cffTable):
+ """Rename topDictIndex charStrings based on glyphOrder."""
+ td = cffTable.cff.topDictIndex[0]
+
+ charStrings = {}
+ for i, v in enumerate(td.CharStrings.charStrings.values()):
+ glyphName = glyphOrder[i]
+ charStrings[glyphName] = v
+ td.CharStrings.charStrings = charStrings
+
+ td.charset = list(glyphOrder)
diff --git a/Lib/fontTools/merge/layout.py b/Lib/fontTools/merge/layout.py
new file mode 100644
index 00000000..4bf01c37
--- /dev/null
+++ b/Lib/fontTools/merge/layout.py
@@ -0,0 +1,466 @@
+# Copyright 2013 Google, Inc. All Rights Reserved.
+#
+# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
+
+from fontTools import ttLib
+from fontTools.ttLib.tables.DefaultTable import DefaultTable
+from fontTools.ttLib.tables import otTables
+from fontTools.merge.base import add_method, mergeObjects
+from fontTools.merge.util import *
+import logging
+
+
+log = logging.getLogger("fontTools.merge")
+
+
+def mergeLookupLists(lst):
+ # TODO Do smarter merge.
+ return sumLists(lst)
+
+def mergeFeatures(lst):
+ assert lst
+ self = otTables.Feature()
+ self.FeatureParams = None
+ self.LookupListIndex = mergeLookupLists([l.LookupListIndex for l in lst if l.LookupListIndex])
+ self.LookupCount = len(self.LookupListIndex)
+ return self
+
+def mergeFeatureLists(lst):
+ d = {}
+ for l in lst:
+ for f in l:
+ tag = f.FeatureTag
+ if tag not in d:
+ d[tag] = []
+ d[tag].append(f.Feature)
+ ret = []
+ for tag in sorted(d.keys()):
+ rec = otTables.FeatureRecord()
+ rec.FeatureTag = tag
+ rec.Feature = mergeFeatures(d[tag])
+ ret.append(rec)
+ return ret
+
+def mergeLangSyses(lst):
+ assert lst
+
+ # TODO Support merging ReqFeatureIndex
+ assert all(l.ReqFeatureIndex == 0xFFFF for l in lst)
+
+ self = otTables.LangSys()
+ self.LookupOrder = None
+ self.ReqFeatureIndex = 0xFFFF
+ self.FeatureIndex = mergeFeatureLists([l.FeatureIndex for l in lst if l.FeatureIndex])
+ self.FeatureCount = len(self.FeatureIndex)
+ return self
+
+def mergeScripts(lst):
+ assert lst
+
+ if len(lst) == 1:
+ return lst[0]
+ langSyses = {}
+ for sr in lst:
+ for lsr in sr.LangSysRecord:
+ if lsr.LangSysTag not in langSyses:
+ langSyses[lsr.LangSysTag] = []
+ langSyses[lsr.LangSysTag].append(lsr.LangSys)
+ lsrecords = []
+ for tag, langSys_list in sorted(langSyses.items()):
+ lsr = otTables.LangSysRecord()
+ lsr.LangSys = mergeLangSyses(langSys_list)
+ lsr.LangSysTag = tag
+ lsrecords.append(lsr)
+
+ self = otTables.Script()
+ self.LangSysRecord = lsrecords
+ self.LangSysCount = len(lsrecords)
+ dfltLangSyses = [s.DefaultLangSys for s in lst if s.DefaultLangSys]
+ if dfltLangSyses:
+ self.DefaultLangSys = mergeLangSyses(dfltLangSyses)
+ else:
+ self.DefaultLangSys = None
+ return self
+
+def mergeScriptRecords(lst):
+ d = {}
+ for l in lst:
+ for s in l:
+ tag = s.ScriptTag
+ if tag not in d:
+ d[tag] = []
+ d[tag].append(s.Script)
+ ret = []
+ for tag in sorted(d.keys()):
+ rec = otTables.ScriptRecord()
+ rec.ScriptTag = tag
+ rec.Script = mergeScripts(d[tag])
+ ret.append(rec)
+ return ret
+
+otTables.ScriptList.mergeMap = {
+ 'ScriptCount': lambda lst: None, # TODO
+ 'ScriptRecord': mergeScriptRecords,
+}
+otTables.BaseScriptList.mergeMap = {
+ 'BaseScriptCount': lambda lst: None, # TODO
+ # TODO: Merge duplicate entries
+ 'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag),
+}
+
+otTables.FeatureList.mergeMap = {
+ 'FeatureCount': sum,
+ 'FeatureRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),
+}
+
+otTables.LookupList.mergeMap = {
+ 'LookupCount': sum,
+ 'Lookup': sumLists,
+}
+
+otTables.Coverage.mergeMap = {
+ 'Format': min,
+ 'glyphs': sumLists,
+}
+
+otTables.ClassDef.mergeMap = {
+ 'Format': min,
+ 'classDefs': sumDicts,
+}
+
+otTables.LigCaretList.mergeMap = {
+ 'Coverage': mergeObjects,
+ 'LigGlyphCount': sum,
+ 'LigGlyph': sumLists,
+}
+
+otTables.AttachList.mergeMap = {
+ 'Coverage': mergeObjects,
+ 'GlyphCount': sum,
+ 'AttachPoint': sumLists,
+}
+
+# XXX Renumber MarkFilterSets of lookups
+otTables.MarkGlyphSetsDef.mergeMap = {
+ 'MarkSetTableFormat': equal,
+ 'MarkSetCount': sum,
+ 'Coverage': sumLists,
+}
+
+otTables.Axis.mergeMap = {
+ '*': mergeObjects,
+}
+
+# XXX Fix BASE table merging
+otTables.BaseTagList.mergeMap = {
+ 'BaseTagCount': sum,
+ 'BaselineTag': sumLists,
+}
+
+otTables.GDEF.mergeMap = \
+otTables.GSUB.mergeMap = \
+otTables.GPOS.mergeMap = \
+otTables.BASE.mergeMap = \
+otTables.JSTF.mergeMap = \
+otTables.MATH.mergeMap = \
+{
+ '*': mergeObjects,
+ 'Version': max,
+}
+
+ttLib.getTableClass('GDEF').mergeMap = \
+ttLib.getTableClass('GSUB').mergeMap = \
+ttLib.getTableClass('GPOS').mergeMap = \
+ttLib.getTableClass('BASE').mergeMap = \
+ttLib.getTableClass('JSTF').mergeMap = \
+ttLib.getTableClass('MATH').mergeMap = \
+{
+ 'tableTag': onlyExisting(equal), # XXX clean me up
+ 'table': mergeObjects,
+}
+
+@add_method(ttLib.getTableClass('GSUB'))
+def merge(self, m, tables):
+
+ assert len(tables) == len(m.duplicateGlyphsPerFont)
+ for i,(table,dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):
+ if not dups: continue
+ if table is None or table is NotImplemented:
+ log.warning("Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s", m.fonts[i]._merger__name, dups)
+ continue
+
+ synthFeature = None
+ synthLookup = None
+ for script in table.table.ScriptList.ScriptRecord:
+ if script.ScriptTag == 'DFLT': continue # XXX
+ for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]:
+ if langsys is None: continue # XXX Create!
+ feature = [v for v in langsys.FeatureIndex if v.FeatureTag == 'locl']
+ assert len(feature) <= 1
+ if feature:
+ feature = feature[0]
+ else:
+ if not synthFeature:
+ synthFeature = otTables.FeatureRecord()
+ synthFeature.FeatureTag = 'locl'
+ f = synthFeature.Feature = otTables.Feature()
+ f.FeatureParams = None
+ f.LookupCount = 0
+ f.LookupListIndex = []
+ table.table.FeatureList.FeatureRecord.append(synthFeature)
+ table.table.FeatureList.FeatureCount += 1
+ feature = synthFeature
+ langsys.FeatureIndex.append(feature)
+ langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag)
+
+ if not synthLookup:
+ subtable = otTables.SingleSubst()
+ subtable.mapping = dups
+ synthLookup = otTables.Lookup()
+ synthLookup.LookupFlag = 0
+ synthLookup.LookupType = 1
+ synthLookup.SubTableCount = 1
+ synthLookup.SubTable = [subtable]
+ if table.table.LookupList is None:
+ # mtiLib uses None as default value for LookupList,
+ # while feaLib points to an empty array with count 0
+ # TODO: make them do the same
+ table.table.LookupList = otTables.LookupList()
+ table.table.LookupList.Lookup = []
+ table.table.LookupList.LookupCount = 0
+ table.table.LookupList.Lookup.append(synthLookup)
+ table.table.LookupList.LookupCount += 1
+
+ if feature.Feature.LookupListIndex[:1] != [synthLookup]:
+ feature.Feature.LookupListIndex[:0] = [synthLookup]
+ feature.Feature.LookupCount += 1
+
+ DefaultTable.merge(self, m, tables)
+ return self
+
+@add_method(otTables.SingleSubst,
+ otTables.MultipleSubst,
+ otTables.AlternateSubst,
+ otTables.LigatureSubst,
+ otTables.ReverseChainSingleSubst,
+ otTables.SinglePos,
+ otTables.PairPos,
+ otTables.CursivePos,
+ otTables.MarkBasePos,
+ otTables.MarkLigPos,
+ otTables.MarkMarkPos)
+def mapLookups(self, lookupMap):
+ pass
+
+# Copied and trimmed down from subset.py
+@add_method(otTables.ContextSubst,
+ otTables.ChainContextSubst,
+ otTables.ContextPos,
+ otTables.ChainContextPos)
+def __merge_classify_context(self):
+
+ class ContextHelper(object):
+ def __init__(self, klass, Format):
+ if klass.__name__.endswith('Subst'):
+ Typ = 'Sub'
+ Type = 'Subst'
+ else:
+ Typ = 'Pos'
+ Type = 'Pos'
+ if klass.__name__.startswith('Chain'):
+ Chain = 'Chain'
+ else:
+ Chain = ''
+ ChainTyp = Chain+Typ
+
+ self.Typ = Typ
+ self.Type = Type
+ self.Chain = Chain
+ self.ChainTyp = ChainTyp
+
+ self.LookupRecord = Type+'LookupRecord'
+
+ if Format == 1:
+ self.Rule = ChainTyp+'Rule'
+ self.RuleSet = ChainTyp+'RuleSet'
+ elif Format == 2:
+ self.Rule = ChainTyp+'ClassRule'
+ self.RuleSet = ChainTyp+'ClassSet'
+
+ if self.Format not in [1, 2, 3]:
+ return None # Don't shoot the messenger; let it go
+ if not hasattr(self.__class__, "_merge__ContextHelpers"):
+ self.__class__._merge__ContextHelpers = {}
+ if self.Format not in self.__class__._merge__ContextHelpers:
+ helper = ContextHelper(self.__class__, self.Format)
+ self.__class__._merge__ContextHelpers[self.Format] = helper
+ return self.__class__._merge__ContextHelpers[self.Format]
+
+
+@add_method(otTables.ContextSubst,
+ otTables.ChainContextSubst,
+ otTables.ContextPos,
+ otTables.ChainContextPos)
+def mapLookups(self, lookupMap):
+ c = self.__merge_classify_context()
+
+ if self.Format in [1, 2]:
+ for rs in getattr(self, c.RuleSet):
+ if not rs: continue
+ for r in getattr(rs, c.Rule):
+ if not r: continue
+ for ll in getattr(r, c.LookupRecord):
+ if not ll: continue
+ ll.LookupListIndex = lookupMap[ll.LookupListIndex]
+ elif self.Format == 3:
+ for ll in getattr(self, c.LookupRecord):
+ if not ll: continue
+ ll.LookupListIndex = lookupMap[ll.LookupListIndex]
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
+@add_method(otTables.ExtensionSubst,
+ otTables.ExtensionPos)
+def mapLookups(self, lookupMap):
+ if self.Format == 1:
+ self.ExtSubTable.mapLookups(lookupMap)
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
+@add_method(otTables.Lookup)
+def mapLookups(self, lookupMap):
+ for st in self.SubTable:
+ if not st: continue
+ st.mapLookups(lookupMap)
+
+@add_method(otTables.LookupList)
+def mapLookups(self, lookupMap):
+ for l in self.Lookup:
+ if not l: continue
+ l.mapLookups(lookupMap)
+
+@add_method(otTables.Lookup)
+def mapMarkFilteringSets(self, markFilteringSetMap):
+ if self.LookupFlag & 0x0010:
+ self.MarkFilteringSet = markFilteringSetMap[self.MarkFilteringSet]
+
+@add_method(otTables.LookupList)
+def mapMarkFilteringSets(self, markFilteringSetMap):
+ for l in self.Lookup:
+ if not l: continue
+ l.mapMarkFilteringSets(markFilteringSetMap)
+
+@add_method(otTables.Feature)
+def mapLookups(self, lookupMap):
+ self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]
+
+@add_method(otTables.FeatureList)
+def mapLookups(self, lookupMap):
+ for f in self.FeatureRecord:
+ if not f or not f.Feature: continue
+ f.Feature.mapLookups(lookupMap)
+
+@add_method(otTables.DefaultLangSys,
+ otTables.LangSys)
+def mapFeatures(self, featureMap):
+ self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]
+ if self.ReqFeatureIndex != 65535:
+ self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]
+
+@add_method(otTables.Script)
+def mapFeatures(self, featureMap):
+ if self.DefaultLangSys:
+ self.DefaultLangSys.mapFeatures(featureMap)
+ for l in self.LangSysRecord:
+ if not l or not l.LangSys: continue
+ l.LangSys.mapFeatures(featureMap)
+
+@add_method(otTables.ScriptList)
+def mapFeatures(self, featureMap):
+ for s in self.ScriptRecord:
+ if not s or not s.Script: continue
+ s.Script.mapFeatures(featureMap)
+
+def layoutPreMerge(font):
+ # Map indices to references
+
+ GDEF = font.get('GDEF')
+ GSUB = font.get('GSUB')
+ GPOS = font.get('GPOS')
+
+ for t in [GSUB, GPOS]:
+ if not t: continue
+
+ if t.table.LookupList:
+ lookupMap = {i:v for i,v in enumerate(t.table.LookupList.Lookup)}
+ t.table.LookupList.mapLookups(lookupMap)
+ t.table.FeatureList.mapLookups(lookupMap)
+
+ if GDEF and GDEF.table.Version >= 0x00010002:
+ markFilteringSetMap = {i:v for i,v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage)}
+ t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
+
+ if t.table.FeatureList and t.table.ScriptList:
+ featureMap = {i:v for i,v in enumerate(t.table.FeatureList.FeatureRecord)}
+ t.table.ScriptList.mapFeatures(featureMap)
+
+ # TODO FeatureParams nameIDs
+
+def layoutPostMerge(font):
+ # Map references back to indices
+
+ GDEF = font.get('GDEF')
+ GSUB = font.get('GSUB')
+ GPOS = font.get('GPOS')
+
+ for t in [GSUB, GPOS]:
+ if not t: continue
+
+ if t.table.FeatureList and t.table.ScriptList:
+
+ # Collect unregistered (new) features.
+ featureMap = GregariousIdentityDict(t.table.FeatureList.FeatureRecord)
+ t.table.ScriptList.mapFeatures(featureMap)
+
+ # Record used features.
+ featureMap = AttendanceRecordingIdentityDict(t.table.FeatureList.FeatureRecord)
+ t.table.ScriptList.mapFeatures(featureMap)
+ usedIndices = featureMap.s
+
+ # Remove unused features
+ t.table.FeatureList.FeatureRecord = [f for i,f in enumerate(t.table.FeatureList.FeatureRecord) if i in usedIndices]
+
+ # Map back to indices.
+ featureMap = NonhashableDict(t.table.FeatureList.FeatureRecord)
+ t.table.ScriptList.mapFeatures(featureMap)
+
+ t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord)
+
+ if t.table.LookupList:
+
+ # Collect unregistered (new) lookups.
+ lookupMap = GregariousIdentityDict(t.table.LookupList.Lookup)
+ t.table.FeatureList.mapLookups(lookupMap)
+ t.table.LookupList.mapLookups(lookupMap)
+
+ # Record used lookups.
+ lookupMap = AttendanceRecordingIdentityDict(t.table.LookupList.Lookup)
+ t.table.FeatureList.mapLookups(lookupMap)
+ t.table.LookupList.mapLookups(lookupMap)
+ usedIndices = lookupMap.s
+
+ # Remove unused lookups
+ t.table.LookupList.Lookup = [l for i,l in enumerate(t.table.LookupList.Lookup) if i in usedIndices]
+
+ # Map back to indices.
+ lookupMap = NonhashableDict(t.table.LookupList.Lookup)
+ t.table.FeatureList.mapLookups(lookupMap)
+ t.table.LookupList.mapLookups(lookupMap)
+
+ t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup)
+
+ if GDEF and GDEF.table.Version >= 0x00010002:
+ markFilteringSetMap = NonhashableDict(GDEF.table.MarkGlyphSetsDef.Coverage)
+ t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
+
+ # TODO FeatureParams nameIDs
diff --git a/Lib/fontTools/merge/options.py b/Lib/fontTools/merge/options.py
new file mode 100644
index 00000000..02dcf4b7
--- /dev/null
+++ b/Lib/fontTools/merge/options.py
@@ -0,0 +1,85 @@
+# Copyright 2013 Google, Inc. All Rights Reserved.
+#
+# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
+
+
+class Options(object):
+
+ class UnknownOptionError(Exception):
+ pass
+
+ def __init__(self, **kwargs):
+
+ self.verbose = False
+ self.timing = False
+ self.drop_tables = []
+
+ self.set(**kwargs)
+
+ def set(self, **kwargs):
+ for k,v in kwargs.items():
+ if not hasattr(self, k):
+ raise self.UnknownOptionError("Unknown option '%s'" % k)
+ setattr(self, k, v)
+
+ def parse_opts(self, argv, ignore_unknown=[]):
+ ret = []
+ opts = {}
+ for a in argv:
+ orig_a = a
+ if not a.startswith('--'):
+ ret.append(a)
+ continue
+ a = a[2:]
+ i = a.find('=')
+ op = '='
+ if i == -1:
+ if a.startswith("no-"):
+ k = a[3:]
+ v = False
+ else:
+ k = a
+ v = True
+ else:
+ k = a[:i]
+ if k[-1] in "-+":
+ op = k[-1]+'=' # Ops is '-=' or '+=' now.
+ k = k[:-1]
+ v = a[i+1:]
+ ok = k
+ k = k.replace('-', '_')
+ if not hasattr(self, k):
+ if ignore_unknown is True or ok in ignore_unknown:
+ ret.append(orig_a)
+ continue
+ else:
+ raise self.UnknownOptionError("Unknown option '%s'" % a)
+
+ ov = getattr(self, k)
+ if isinstance(ov, bool):
+ v = bool(v)
+ elif isinstance(ov, int):
+ v = int(v)
+ elif isinstance(ov, list):
+ vv = v.split(',')
+ if vv == ['']:
+ vv = []
+ vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
+ if op == '=':
+ v = vv
+ elif op == '+=':
+ v = ov
+ v.extend(vv)
+ elif op == '-=':
+ v = ov
+ for x in vv:
+ if x in v:
+ v.remove(x)
+ else:
+ assert 0
+
+ opts[k] = v
+ self.set(**opts)
+
+ return ret
+
diff --git a/Lib/fontTools/merge/tables.py b/Lib/fontTools/merge/tables.py
new file mode 100644
index 00000000..b266f7a9
--- /dev/null
+++ b/Lib/fontTools/merge/tables.py
@@ -0,0 +1,311 @@
+# Copyright 2013 Google, Inc. All Rights Reserved.
+#
+# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
+
+from fontTools import ttLib, cffLib
+from fontTools.ttLib.tables.DefaultTable import DefaultTable
+from fontTools.merge.base import add_method, mergeObjects
+from fontTools.merge.cmap import computeMegaCmap
+from fontTools.merge.util import *
+import logging
+
+
+log = logging.getLogger("fontTools.merge")
+
+
+ttLib.getTableClass('maxp').mergeMap = {
+ '*': max,
+ 'tableTag': equal,
+ 'tableVersion': equal,
+ 'numGlyphs': sum,
+ 'maxStorage': first,
+ 'maxFunctionDefs': first,
+ 'maxInstructionDefs': first,
+ # TODO When we correctly merge hinting data, update these values:
+ # maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions
+}
+
+headFlagsMergeBitMap = {
+ 'size': 16,
+ '*': bitwise_or,
+ 1: bitwise_and, # Baseline at y = 0
+ 2: bitwise_and, # lsb at x = 0
+ 3: bitwise_and, # Force ppem to integer values. FIXME?
+ 5: bitwise_and, # Font is vertical
+ 6: lambda bit: 0, # Always set to zero
+ 11: bitwise_and, # Font data is 'lossless'
+ 13: bitwise_and, # Optimized for ClearType
+ 14: bitwise_and, # Last resort font. FIXME? equal or first may be better
+ 15: lambda bit: 0, # Always set to zero
+}
+
+ttLib.getTableClass('head').mergeMap = {
+ 'tableTag': equal,
+ 'tableVersion': max,
+ 'fontRevision': max,
+ 'checkSumAdjustment': lambda lst: 0, # We need *something* here
+ 'magicNumber': equal,
+ 'flags': mergeBits(headFlagsMergeBitMap),
+ 'unitsPerEm': equal,
+ 'created': current_time,
+ 'modified': current_time,
+ 'xMin': min,
+ 'yMin': min,
+ 'xMax': max,
+ 'yMax': max,
+ 'macStyle': first,
+ 'lowestRecPPEM': max,
+ 'fontDirectionHint': lambda lst: 2,
+ 'indexToLocFormat': first,
+ 'glyphDataFormat': equal,
+}
+
+ttLib.getTableClass('hhea').mergeMap = {
+ '*': equal,
+ 'tableTag': equal,
+ 'tableVersion': max,
+ 'ascent': max,
+ 'descent': min,
+ 'lineGap': max,
+ 'advanceWidthMax': max,
+ 'minLeftSideBearing': min,
+ 'minRightSideBearing': min,
+ 'xMaxExtent': max,
+ 'caretSlopeRise': first,
+ 'caretSlopeRun': first,
+ 'caretOffset': first,
+ 'numberOfHMetrics': recalculate,
+}
+
+ttLib.getTableClass('vhea').mergeMap = {
+ '*': equal,
+ 'tableTag': equal,
+ 'tableVersion': max,
+ 'ascent': max,
+ 'descent': min,
+ 'lineGap': max,
+ 'advanceHeightMax': max,
+ 'minTopSideBearing': min,
+ 'minBottomSideBearing': min,
+ 'yMaxExtent': max,
+ 'caretSlopeRise': first,
+ 'caretSlopeRun': first,
+ 'caretOffset': first,
+ 'numberOfVMetrics': recalculate,
+}
+
+os2FsTypeMergeBitMap = {
+ 'size': 16,
+ '*': lambda bit: 0,
+ 1: bitwise_or, # no embedding permitted
+ 2: bitwise_and, # allow previewing and printing documents
+ 3: bitwise_and, # allow editing documents
+ 8: bitwise_or, # no subsetting permitted
+ 9: bitwise_or, # no embedding of outlines permitted
+}
+
+def mergeOs2FsType(lst):
+ lst = list(lst)
+ if all(item == 0 for item in lst):
+ return 0
+
+ # Compute least restrictive logic for each fsType value
+ for i in range(len(lst)):
+ # unset bit 1 (no embedding permitted) if either bit 2 or 3 is set
+ if lst[i] & 0x000C:
+ lst[i] &= ~0x0002
+ # set bit 2 (allow previewing) if bit 3 is set (allow editing)
+ elif lst[i] & 0x0008:
+ lst[i] |= 0x0004
+ # set bits 2 and 3 if everything is allowed
+ elif lst[i] == 0:
+ lst[i] = 0x000C
+
+ fsType = mergeBits(os2FsTypeMergeBitMap)(lst)
+ # unset bits 2 and 3 if bit 1 is set (some font is "no embedding")
+ if fsType & 0x0002:
+ fsType &= ~0x000C
+ return fsType
+
+
+ttLib.getTableClass('OS/2').mergeMap = {
+ '*': first,
+ 'tableTag': equal,
+ 'version': max,
+ 'xAvgCharWidth': avg_int, # Apparently fontTools doesn't recalc this
+ 'fsType': mergeOs2FsType, # Will be overwritten
+ 'panose': first, # FIXME: should really be the first Latin font
+ 'ulUnicodeRange1': bitwise_or,
+ 'ulUnicodeRange2': bitwise_or,
+ 'ulUnicodeRange3': bitwise_or,
+ 'ulUnicodeRange4': bitwise_or,
+ 'fsFirstCharIndex': min,
+ 'fsLastCharIndex': max,
+ 'sTypoAscender': max,
+ 'sTypoDescender': min,
+ 'sTypoLineGap': max,
+ 'usWinAscent': max,
+ 'usWinDescent': max,
+ # Version 1
+ 'ulCodePageRange1': onlyExisting(bitwise_or),
+ 'ulCodePageRange2': onlyExisting(bitwise_or),
+ # Version 2, 3, 4
+ 'sxHeight': onlyExisting(max),
+ 'sCapHeight': onlyExisting(max),
+ 'usDefaultChar': onlyExisting(first),
+ 'usBreakChar': onlyExisting(first),
+ 'usMaxContext': onlyExisting(max),
+ # version 5
+ 'usLowerOpticalPointSize': onlyExisting(min),
+ 'usUpperOpticalPointSize': onlyExisting(max),
+}
+
+@add_method(ttLib.getTableClass('OS/2'))
+def merge(self, m, tables):
+ DefaultTable.merge(self, m, tables)
+ if self.version < 2:
+ # bits 8 and 9 are reserved and should be set to zero
+ self.fsType &= ~0x0300
+ if self.version >= 3:
+ # Only one of bits 1, 2, and 3 may be set. We already take
+ # care of bit 1 implications in mergeOs2FsType. So unset
+ # bit 2 if bit 3 is already set.
+ if self.fsType & 0x0008:
+ self.fsType &= ~0x0004
+ return self
+
+ttLib.getTableClass('post').mergeMap = {
+ '*': first,
+ 'tableTag': equal,
+ 'formatType': max,
+ 'isFixedPitch': min,
+ 'minMemType42': max,
+ 'maxMemType42': lambda lst: 0,
+ 'minMemType1': max,
+ 'maxMemType1': lambda lst: 0,
+ 'mapping': onlyExisting(sumDicts),
+ 'extraNames': lambda lst: [],
+}
+
+ttLib.getTableClass('vmtx').mergeMap = ttLib.getTableClass('hmtx').mergeMap = {
+ 'tableTag': equal,
+ 'metrics': sumDicts,
+}
+
+ttLib.getTableClass('name').mergeMap = {
+ 'tableTag': equal,
+ 'names': first, # FIXME? Does mixing name records make sense?
+}
+
+ttLib.getTableClass('loca').mergeMap = {
+ '*': recalculate,
+ 'tableTag': equal,
+}
+
+ttLib.getTableClass('glyf').mergeMap = {
+ 'tableTag': equal,
+ 'glyphs': sumDicts,
+ 'glyphOrder': sumLists,
+}
+
+@add_method(ttLib.getTableClass('glyf'))
+def merge(self, m, tables):
+ for i,table in enumerate(tables):
+ for g in table.glyphs.values():
+ if i:
+ # Drop hints for all but first font, since
+ # we don't map functions / CVT values.
+ g.removeHinting()
+ # Expand composite glyphs to load their
+ # composite glyph names.
+ if g.isComposite():
+ g.expand(table)
+ return DefaultTable.merge(self, m, tables)
+
+ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst)
+ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst)
+ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst)
+ttLib.getTableClass('gasp').mergeMap = lambda self, lst: first(lst) # FIXME? Appears irreconcilable
+
+@add_method(ttLib.getTableClass('CFF '))
+def merge(self, m, tables):
+
+ if any(hasattr(table, "FDSelect") for table in tables):
+ raise NotImplementedError(
+ "Merging CID-keyed CFF tables is not supported yet"
+ )
+
+ for table in tables:
+ table.cff.desubroutinize()
+
+ newcff = tables[0]
+ newfont = newcff.cff[0]
+ private = newfont.Private
+ storedNamesStrings = []
+ glyphOrderStrings = []
+ glyphOrder = set(newfont.getGlyphOrder())
+
+ for name in newfont.strings.strings:
+ if name not in glyphOrder:
+ storedNamesStrings.append(name)
+ else:
+ glyphOrderStrings.append(name)
+
+ chrset = list(newfont.charset)
+ newcs = newfont.CharStrings
+ log.debug("FONT 0 CharStrings: %d.", len(newcs))
+
+ for i, table in enumerate(tables[1:], start=1):
+ font = table.cff[0]
+ font.Private = private
+ fontGlyphOrder = set(font.getGlyphOrder())
+ for name in font.strings.strings:
+ if name in fontGlyphOrder:
+ glyphOrderStrings.append(name)
+ cs = font.CharStrings
+ gs = table.cff.GlobalSubrs
+ log.debug("Font %d CharStrings: %d.", i, len(cs))
+ chrset.extend(font.charset)
+ if newcs.charStringsAreIndexed:
+ for i, name in enumerate(cs.charStrings, start=len(newcs)):
+ newcs.charStrings[name] = i
+ newcs.charStringsIndex.items.append(None)
+ for name in cs.charStrings:
+ newcs[name] = cs[name]
+
+ newfont.charset = chrset
+ newfont.numGlyphs = len(chrset)
+ newfont.strings.strings = glyphOrderStrings + storedNamesStrings
+
+ return newcff
+
+@add_method(ttLib.getTableClass('cmap'))
+def merge(self, m, tables):
+
+ # TODO Handle format=14.
+ if not hasattr(m, 'cmap'):
+ computeMegaCmap(m, tables)
+ cmap = m.cmap
+
+ cmapBmpOnly = {uni: gid for uni,gid in cmap.items() if uni <= 0xFFFF}
+ self.tables = []
+ module = ttLib.getTableModule('cmap')
+ if len(cmapBmpOnly) != len(cmap):
+ # format-12 required.
+ cmapTable = module.cmap_classes[12](12)
+ cmapTable.platformID = 3
+ cmapTable.platEncID = 10
+ cmapTable.language = 0
+ cmapTable.cmap = cmap
+ self.tables.append(cmapTable)
+ # always create format-4
+ cmapTable = module.cmap_classes[4](4)
+ cmapTable.platformID = 3
+ cmapTable.platEncID = 1
+ cmapTable.language = 0
+ cmapTable.cmap = cmapBmpOnly
+ # ordered by platform then encoding
+ self.tables.insert(0, cmapTable)
+ self.tableVersion = 0
+ self.numSubTables = len(self.tables)
+ return self
diff --git a/Lib/fontTools/merge/unicode.py b/Lib/fontTools/merge/unicode.py
new file mode 100644
index 00000000..f91baee8
--- /dev/null
+++ b/Lib/fontTools/merge/unicode.py
@@ -0,0 +1,65 @@
+# Copyright 2021 Behdad Esfahbod. All Rights Reserved.
+
+def is_Default_Ignorable(u):
+ # http://www.unicode.org/reports/tr44/#Default_Ignorable_Code_Point
+ #
+ # TODO Move me to unicodedata module and autogenerate.
+ #
+ # Unicode 14.0:
+ # $ grep '; Default_Ignorable_Code_Point ' DerivedCoreProperties.txt | sed 's/;.*#/#/'
+ # 00AD # Cf SOFT HYPHEN
+ # 034F # Mn COMBINING GRAPHEME JOINER
+ # 061C # Cf ARABIC LETTER MARK
+ # 115F..1160 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
+ # 17B4..17B5 # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
+ # 180B..180D # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
+ # 180E # Cf MONGOLIAN VOWEL SEPARATOR
+ # 180F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
+ # 200B..200F # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
+ # 202A..202E # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
+ # 2060..2064 # Cf [5] WORD JOINER..INVISIBLE PLUS
+ # 2065 # Cn <reserved-2065>
+ # 2066..206F # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
+ # 3164 # Lo HANGUL FILLER
+ # FE00..FE0F # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
+ # FEFF # Cf ZERO WIDTH NO-BREAK SPACE
+ # FFA0 # Lo HALFWIDTH HANGUL FILLER
+ # FFF0..FFF8 # Cn [9] <reserved-FFF0>..<reserved-FFF8>
+ # 1BCA0..1BCA3 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
+ # 1D173..1D17A # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
+ # E0000 # Cn <reserved-E0000>
+ # E0001 # Cf LANGUAGE TAG
+ # E0002..E001F # Cn [30] <reserved-E0002>..<reserved-E001F>
+ # E0020..E007F # Cf [96] TAG SPACE..CANCEL TAG
+ # E0080..E00FF # Cn [128] <reserved-E0080>..<reserved-E00FF>
+ # E0100..E01EF # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
+ # E01F0..E0FFF # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
+ return (
+ u == 0x00AD or # Cf SOFT HYPHEN
+ u == 0x034F or # Mn COMBINING GRAPHEME JOINER
+ u == 0x061C or # Cf ARABIC LETTER MARK
+ 0x115F <= u <= 0x1160 or # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
+ 0x17B4 <= u <= 0x17B5 or # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
+ 0x180B <= u <= 0x180D or # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
+ u == 0x180E or # Cf MONGOLIAN VOWEL SEPARATOR
+ u == 0x180F or # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
+ 0x200B <= u <= 0x200F or # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
+ 0x202A <= u <= 0x202E or # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
+ 0x2060 <= u <= 0x2064 or # Cf [5] WORD JOINER..INVISIBLE PLUS
+ u == 0x2065 or # Cn <reserved-2065>
+ 0x2066 <= u <= 0x206F or # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
+ u == 0x3164 or # Lo HANGUL FILLER
+ 0xFE00 <= u <= 0xFE0F or # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
+ u == 0xFEFF or # Cf ZERO WIDTH NO-BREAK SPACE
+ u == 0xFFA0 or # Lo HALFWIDTH HANGUL FILLER
+ 0xFFF0 <= u <= 0xFFF8 or # Cn [9] <reserved-FFF0>..<reserved-FFF8>
+ 0x1BCA0 <= u <= 0x1BCA3 or # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
+ 0x1D173 <= u <= 0x1D17A or # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
+ u == 0xE0000 or # Cn <reserved-E0000>
+ u == 0xE0001 or # Cf LANGUAGE TAG
+ 0xE0002 <= u <= 0xE001F or # Cn [30] <reserved-E0002>..<reserved-E001F>
+ 0xE0020 <= u <= 0xE007F or # Cf [96] TAG SPACE..CANCEL TAG
+ 0xE0080 <= u <= 0xE00FF or # Cn [128] <reserved-E0080>..<reserved-E00FF>
+ 0xE0100 <= u <= 0xE01EF or # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
+ 0xE01F0 <= u <= 0xE0FFF or # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
+ False)
diff --git a/Lib/fontTools/merge/util.py b/Lib/fontTools/merge/util.py
new file mode 100644
index 00000000..66cea4d5
--- /dev/null
+++ b/Lib/fontTools/merge/util.py
@@ -0,0 +1,131 @@
+# Copyright 2013 Google, Inc. All Rights Reserved.
+#
+# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
+
+from fontTools.misc.timeTools import timestampNow
+from fontTools.ttLib.tables.DefaultTable import DefaultTable
+from functools import reduce
+import operator
+import logging
+
+
+log = logging.getLogger("fontTools.merge")
+
+
+# General utility functions for merging values from different fonts
+
+def equal(lst):
+ lst = list(lst)
+ t = iter(lst)
+ first = next(t)
+ assert all(item == first for item in t), "Expected all items to be equal: %s" % lst
+ return first
+
+def first(lst):
+ return next(iter(lst))
+
+def recalculate(lst):
+ return NotImplemented
+
+def current_time(lst):
+ return timestampNow()
+
+def bitwise_and(lst):
+ return reduce(operator.and_, lst)
+
+def bitwise_or(lst):
+ return reduce(operator.or_, lst)
+
+def avg_int(lst):
+ lst = list(lst)
+ return sum(lst) // len(lst)
+
+def onlyExisting(func):
+ """Returns a filter func that when called with a list,
+ only calls func on the non-NotImplemented items of the list,
+ and only so if there's at least one item remaining.
+ Otherwise returns NotImplemented."""
+
+ def wrapper(lst):
+ items = [item for item in lst if item is not NotImplemented]
+ return func(items) if items else NotImplemented
+
+ return wrapper
+
+def sumLists(lst):
+ l = []
+ for item in lst:
+ l.extend(item)
+ return l
+
+def sumDicts(lst):
+ d = {}
+ for item in lst:
+ d.update(item)
+ return d
+
+def mergeBits(bitmap):
+
+ def wrapper(lst):
+ lst = list(lst)
+ returnValue = 0
+ for bitNumber in range(bitmap['size']):
+ try:
+ mergeLogic = bitmap[bitNumber]
+ except KeyError:
+ try:
+ mergeLogic = bitmap['*']
+ except KeyError:
+ raise Exception("Don't know how to merge bit %s" % bitNumber)
+ shiftedBit = 1 << bitNumber
+ mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst)
+ returnValue |= mergedValue << bitNumber
+ return returnValue
+
+ return wrapper
+
+
+class AttendanceRecordingIdentityDict(object):
+ """A dictionary-like object that records indices of items actually accessed
+ from a list."""
+
+ def __init__(self, lst):
+ self.l = lst
+ self.d = {id(v):i for i,v in enumerate(lst)}
+ self.s = set()
+
+ def __getitem__(self, v):
+ self.s.add(self.d[id(v)])
+ return v
+
+class GregariousIdentityDict(object):
+ """A dictionary-like object that welcomes guests without reservations and
+ adds them to the end of the guest list."""
+
+ def __init__(self, lst):
+ self.l = lst
+ self.s = set(id(v) for v in lst)
+
+ def __getitem__(self, v):
+ if id(v) not in self.s:
+ self.s.add(id(v))
+ self.l.append(v)
+ return v
+
+class NonhashableDict(object):
+ """A dictionary-like object mapping objects to values."""
+
+ def __init__(self, keys, values=None):
+ if values is None:
+ self.d = {id(v):i for i,v in enumerate(keys)}
+ else:
+ self.d = {id(k):v for k,v in zip(keys, values)}
+
+ def __getitem__(self, k):
+ return self.d[id(k)]
+
+ def __setitem__(self, k, v):
+ self.d[id(k)] = v
+
+ def __delitem__(self, k):
+ del self.d[id(k)]
diff --git a/Lib/fontTools/misc/arrayTools.py b/Lib/fontTools/misc/arrayTools.py
index c20a9eda..01ccbe82 100644
--- a/Lib/fontTools/misc/arrayTools.py
+++ b/Lib/fontTools/misc/arrayTools.py
@@ -17,7 +17,7 @@ def calcBounds(array):
Returns:
A four-item tuple representing the bounding rectangle ``(xMin, yMin, xMax, yMax)``.
"""
- if len(array) == 0:
+ if not array:
return 0, 0, 0, 0
xs = [x for x, y in array]
ys = [y for x, y in array]
diff --git a/Lib/fontTools/misc/bezierTools.py b/Lib/fontTools/misc/bezierTools.py
index 2cf2640c..25e5c548 100644
--- a/Lib/fontTools/misc/bezierTools.py
+++ b/Lib/fontTools/misc/bezierTools.py
@@ -879,12 +879,14 @@ def _line_t_of_pt(s, e, pt):
sx, sy = s
ex, ey = e
px, py = pt
- if not math.isclose(sx, ex):
+ if abs(sx - ex) < epsilon and abs(sy - ey) < epsilon:
+ # Line is a point!
+ return -1
+ # Use the largest
+ if abs(sx - ex) > abs(sy - ey):
return (px - sx) / (ex - sx)
- if not math.isclose(sy, ey):
+ else:
return (py - sy) / (ey - sy)
- # Line is a point!
- return -1
def _both_points_are_on_same_side_of_origin(a, b, origin):
@@ -914,7 +916,7 @@ def lineLineIntersections(s1, e1, s2, e2):
>>> intersection.pt
(374.44882952482897, 313.73458370177315)
>>> (intersection.t1, intersection.t2)
- (0.45069111555824454, 0.5408153767394238)
+ (0.45069111555824465, 0.5408153767394238)
"""
s1x, s1y = s1
e1x, e1y = e1
@@ -1013,7 +1015,7 @@ def curveLineIntersections(curve, line):
>>> len(intersections)
3
>>> intersections[0].pt
- (84.90010344084885, 189.87306176459828)
+ (84.9000930760723, 189.87306176459828)
"""
if len(curve) == 3:
pointFinder = quadraticPointAtT
@@ -1024,7 +1026,11 @@ def curveLineIntersections(curve, line):
intersections = []
for t in _curve_line_intersections_t(curve, line):
pt = pointFinder(*curve, t)
- intersections.append(Intersection(pt=pt, t1=t, t2=_line_t_of_pt(*line, pt)))
+ # Back-project the point onto the line, to avoid problems with
+ # numerical accuracy in the case of vertical and horizontal lines
+ line_t = _line_t_of_pt(*line, pt)
+ pt = linePointAtT(*line, line_t)
+ intersections.append(Intersection(pt=pt, t1=t, t2=line_t))
return intersections
@@ -1169,7 +1175,7 @@ def segmentSegmentIntersections(seg1, seg2):
>>> len(intersections)
3
>>> intersections[0].pt
- (84.90010344084885, 189.87306176459828)
+ (84.9000930760723, 189.87306176459828)
"""
# Arrange by degree
diff --git a/Lib/fontTools/misc/eexec.py b/Lib/fontTools/misc/eexec.py
index 71f733c1..d1d4bb6a 100644
--- a/Lib/fontTools/misc/eexec.py
+++ b/Lib/fontTools/misc/eexec.py
@@ -12,7 +12,7 @@ the new key at the end of the operation.
"""
-from fontTools.misc.py23 import bytechr, bytesjoin, byteord
+from fontTools.misc.textTools import bytechr, bytesjoin, byteord
def _decryptChar(cipher, R):
diff --git a/Lib/fontTools/misc/etree.py b/Lib/fontTools/misc/etree.py
index 6e943e4b..cd4df365 100644
--- a/Lib/fontTools/misc/etree.py
+++ b/Lib/fontTools/misc/etree.py
@@ -11,7 +11,7 @@ or subclasses built-in ElementTree classes to add features that are
only availble in lxml, like OrderedDict for attributes, pretty_print and
iterwalk.
"""
-from fontTools.misc.py23 import unicode, tostr
+from fontTools.misc.textTools import tostr
XML_DECLARATION = """<?xml version='1.0' encoding='%s'?>"""
@@ -150,9 +150,7 @@ except ImportError:
)
return
- if encoding is unicode or (
- encoding is not None and encoding.lower() == "unicode"
- ):
+ if encoding is not None and encoding.lower() == "unicode":
if xml_declaration:
raise ValueError(
"Serialisation to unicode must not request an XML declaration"
diff --git a/Lib/fontTools/misc/fixedTools.py b/Lib/fontTools/misc/fixedTools.py
index f0474abf..6ec7d06e 100644
--- a/Lib/fontTools/misc/fixedTools.py
+++ b/Lib/fontTools/misc/fixedTools.py
@@ -17,7 +17,7 @@ functions for converting between fixed-point, float and string representations.
The maximum value that can still fit in an F2Dot14. (1.99993896484375)
"""
-from .roundTools import otRound
+from .roundTools import otRound, nearestMultipleShortestRepr
import logging
log = logging.getLogger(__name__)
@@ -125,6 +125,7 @@ def fixedToStr(value, precisionBits):
This is pretty slow compared to the simple division used in ``fixedToFloat``.
Use sporadically when you need to serialize or print the fixed-point number in
a human-readable form.
+ It uses nearestMultipleShortestRepr under the hood.
Args:
value (int): The fixed-point value to convert.
@@ -133,27 +134,8 @@ def fixedToStr(value, precisionBits):
Returns:
str: A string representation of the value.
"""
- if not value: return "0.0"
-
scale = 1 << precisionBits
- value /= scale
- eps = .5 / scale
- lo = value - eps
- hi = value + eps
- # If the range of valid choices spans an integer, return the integer.
- if int(lo) != int(hi):
- return str(float(round(value)))
- fmt = "%.8f"
- lo = fmt % lo
- hi = fmt % hi
- assert len(lo) == len(hi) and lo != hi
- for i in range(len(lo)):
- if lo[i] != hi[i]:
- break
- period = lo.find('.')
- assert period < i
- fmt = "%%.%df" % (i - period)
- return fmt % value
+ return nearestMultipleShortestRepr(value/scale, factor=1.0/scale)
def strToFixed(string, precisionBits):
@@ -168,9 +150,9 @@ def strToFixed(string, precisionBits):
Examples::
- >>> ## to convert a float string to a 2.14 fixed-point number:
- >>> strToFixed('-0.61884', precisionBits=14)
- -10139
+ >>> ## to convert a float string to a 2.14 fixed-point number:
+ >>> strToFixed('-0.61884', precisionBits=14)
+ -10139
"""
value = float(string)
return otRound(value * (1 << precisionBits))
@@ -214,7 +196,7 @@ def floatToFixedToStr(value, precisionBits):
This uses the shortest decimal representation (ie. the least
number of fractional decimal digits) to represent the equivalent
fixed-point number with ``precisionBits`` fractional binary digits.
- It uses fixedToStr under the hood.
+ It uses nearestMultipleShortestRepr under the hood.
>>> floatToFixedToStr(-0.61883544921875, precisionBits=14)
'-0.61884'
@@ -227,8 +209,8 @@ def floatToFixedToStr(value, precisionBits):
str: A string representation of the value.
"""
- fixed = otRound(value * (1 << precisionBits))
- return fixedToStr(fixed, precisionBits)
+ scale = 1 << precisionBits
+ return nearestMultipleShortestRepr(value, factor=1.0/scale)
def ensureVersionIsLong(value):
diff --git a/Lib/fontTools/misc/intTools.py b/Lib/fontTools/misc/intTools.py
index 448e1627..6ba03e16 100644
--- a/Lib/fontTools/misc/intTools.py
+++ b/Lib/fontTools/misc/intTools.py
@@ -1,28 +1,25 @@
-__all__ = ['popCount']
+__all__ = ["popCount"]
-def popCount(v):
- """Return number of 1 bits (population count) of an integer.
+try:
+ bit_count = int.bit_count
+except AttributeError:
- If the integer is negative, the number of 1 bits in the
- twos-complement representation of the integer is returned. i.e.
- ``popCount(-30) == 28`` because -30 is::
+ def bit_count(v):
+ return bin(v).count("1")
- 1111 1111 1111 1111 1111 1111 1110 0010
- Uses the algorithm from `HAKMEM item 169 <https://www.inwap.com/pdp10/hbaker/hakmem/hacks.html#item169>`_.
+"""Return number of 1 bits (population count) of the absolute value of an integer.
- Args:
- v (int): Value to count.
+See https://docs.python.org/3.10/library/stdtypes.html#int.bit_count
+"""
+popCount = bit_count
- Returns:
- Number of 1 bits in the binary representation of ``v``.
- """
- if v > 0xFFFFFFFF:
- return popCount(v >> 32) + popCount(v & 0xFFFFFFFF)
+def bit_indices(v):
+ """Return list of indices where bits are set, 0 being the index of the least significant bit.
- # HACKMEM 169
- y = (v >> 1) & 0xDB6DB6DB
- y = v - y - ((y >> 1) & 0xDB6DB6DB)
- return (((y + (y >> 3)) & 0xC71C71C7) % 0x3F)
+ >>> bit_indices(0b101)
+ [0, 2]
+ """
+ return [i for i, b in enumerate(bin(v)[::-1]) if b == "1"]
diff --git a/Lib/fontTools/misc/macCreatorType.py b/Lib/fontTools/misc/macCreatorType.py
index fb237200..6b191054 100644
--- a/Lib/fontTools/misc/macCreatorType.py
+++ b/Lib/fontTools/misc/macCreatorType.py
@@ -1,4 +1,4 @@
-from fontTools.misc.py23 import Tag, bytesjoin, strjoin
+from fontTools.misc.textTools import Tag, bytesjoin, strjoin
try:
import xattr
except ImportError:
@@ -18,7 +18,7 @@ def getMacCreatorAndType(path):
path (str): A file path.
Returns:
- A tuple of two :py:class:`fontTools.py23.Tag` objects, the first
+ A tuple of two :py:class:`fontTools.textTools.Tag` objects, the first
representing the file creator and the second representing the
file type.
"""
diff --git a/Lib/fontTools/misc/macRes.py b/Lib/fontTools/misc/macRes.py
index 2c15b347..895ca1b8 100644
--- a/Lib/fontTools/misc/macRes.py
+++ b/Lib/fontTools/misc/macRes.py
@@ -1,7 +1,7 @@
-from fontTools.misc.py23 import bytesjoin, tostr
from io import BytesIO
import struct
from fontTools.misc import sstruct
+from fontTools.misc.textTools import bytesjoin, tostr
from collections import OrderedDict
from collections.abc import MutableMapping
diff --git a/Lib/fontTools/misc/plistlib/__init__.py b/Lib/fontTools/misc/plistlib/__init__.py
index 84dc4183..eb4b5259 100644
--- a/Lib/fontTools/misc/plistlib/__init__.py
+++ b/Lib/fontTools/misc/plistlib/__init__.py
@@ -23,7 +23,7 @@ from functools import singledispatch
from fontTools.misc import etree
-from fontTools.misc.py23 import tostr
+from fontTools.misc.textTools import tostr
# By default, we
@@ -151,7 +151,7 @@ PlistEncodable = Union[
Data,
datetime,
float,
- int,
+ Integral,
Mapping[str, Any],
Sequence[Any],
str,
diff --git a/Lib/fontTools/misc/psCharStrings.py b/Lib/fontTools/misc/psCharStrings.py
index cb675050..29c2d365 100644
--- a/Lib/fontTools/misc/psCharStrings.py
+++ b/Lib/fontTools/misc/psCharStrings.py
@@ -2,10 +2,10 @@
CFF dictionary data and Type1/Type2 CharStrings.
"""
-from fontTools.misc.py23 import bytechr, byteord, bytesjoin, strjoin
from fontTools.misc.fixedTools import (
fixedToFloat, floatToFixed, floatToFixedToStr, strToFixedToFloat,
)
+from fontTools.misc.textTools import bytechr, byteord, bytesjoin, strjoin
from fontTools.pens.boundsPen import BoundsPen
import struct
import logging
diff --git a/Lib/fontTools/misc/psLib.py b/Lib/fontTools/misc/psLib.py
index 916755ce..a6c8b8b5 100644
--- a/Lib/fontTools/misc/psLib.py
+++ b/Lib/fontTools/misc/psLib.py
@@ -1,4 +1,4 @@
-from fontTools.misc.py23 import bytechr, byteord, bytesjoin, tobytes, tostr
+from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes, tostr
from fontTools.misc import eexec
from .psOperators import (
PSOperators,
@@ -365,6 +365,7 @@ def suckfont(data, encoding="ascii"):
m = re.search(br"/FontName\s+/([^ \t\n\r]+)\s+def", data)
if m:
fontName = m.group(1)
+ fontName = fontName.decode()
else:
fontName = None
interpreter = PSInterpreter(encoding=encoding)
diff --git a/Lib/fontTools/misc/py23.py b/Lib/fontTools/misc/py23.py
index 9096e2ef..29f634d6 100644
--- a/Lib/fontTools/misc/py23.py
+++ b/Lib/fontTools/misc/py23.py
@@ -8,6 +8,8 @@ from io import BytesIO
from io import StringIO as UnicodeIO
from types import SimpleNamespace
+from .textTools import Tag, bytechr, byteord, bytesjoin, strjoin, tobytes, tostr
+
warnings.warn(
"The py23 module has been deprecated and will be removed in a future release. "
"Please update your code.",
@@ -57,61 +59,7 @@ unichr = chr
unicode = str
zip = zip
-
-def bytechr(n):
- return bytes([n])
-
-
-def byteord(c):
- return c if isinstance(c, int) else ord(c)
-
-
-def strjoin(iterable, joiner=""):
- return tostr(joiner).join(iterable)
-
-
-def tobytes(s, encoding="ascii", errors="strict"):
- if not isinstance(s, bytes):
- return s.encode(encoding, errors)
- else:
- return s
-
-
-def tounicode(s, encoding="ascii", errors="strict"):
- if not isinstance(s, unicode):
- return s.decode(encoding, errors)
- else:
- return s
-
-
-tostr = tounicode
-
-
-class Tag(str):
- @staticmethod
- def transcode(blob):
- if isinstance(blob, bytes):
- blob = blob.decode("latin-1")
- return blob
-
- def __new__(self, content):
- return str.__new__(self, self.transcode(content))
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __eq__(self, other):
- return str.__eq__(self, self.transcode(other))
-
- def __hash__(self):
- return str.__hash__(self)
-
- def tobytes(self):
- return self.encode("latin-1")
-
-
-def bytesjoin(iterable, joiner=b""):
- return tobytes(joiner).join(tobytes(item) for item in iterable)
+tounicode = tostr
def xrange(*args, **kwargs):
diff --git a/Lib/fontTools/misc/roundTools.py b/Lib/fontTools/misc/roundTools.py
index c1d546f1..6f4aa634 100644
--- a/Lib/fontTools/misc/roundTools.py
+++ b/Lib/fontTools/misc/roundTools.py
@@ -56,3 +56,50 @@ def roundFunc(tolerance, round=otRound):
return round
return functools.partial(maybeRound, tolerance=tolerance, round=round)
+
+
+def nearestMultipleShortestRepr(value: float, factor: float) -> str:
+ """Round to nearest multiple of factor and return shortest decimal representation.
+
+ This chooses the float that is closer to a multiple of the given factor while
+ having the shortest decimal representation (the least number of fractional decimal
+ digits).
+
+ For example, given the following:
+
+ >>> nearestMultipleShortestRepr(-0.61883544921875, 1.0/(1<<14))
+ '-0.61884'
+
+ Useful when you need to serialize or print a fixed-point number (or multiples
+ thereof, such as F2Dot14 fractions of 180 degrees in COLRv1 PaintRotate) in
+ a human-readable form.
+
+ Args:
+ value (value): The value to be rounded and serialized.
+ factor (float): The value which the result is a close multiple of.
+
+ Returns:
+ str: A compact string representation of the value.
+ """
+ if not value:
+ return "0.0"
+
+ value = otRound(value / factor) * factor
+ eps = .5 * factor
+ lo = value - eps
+ hi = value + eps
+ # If the range of valid choices spans an integer, return the integer.
+ if int(lo) != int(hi):
+ return str(float(round(value)))
+
+ fmt = "%.8f"
+ lo = fmt % lo
+ hi = fmt % hi
+ assert len(lo) == len(hi) and lo != hi
+ for i in range(len(lo)):
+ if lo[i] != hi[i]:
+ break
+ period = lo.find('.')
+ assert period < i
+ fmt = "%%.%df" % (i - period)
+ return fmt % value
diff --git a/Lib/fontTools/misc/sstruct.py b/Lib/fontTools/misc/sstruct.py
index ba1f8788..6db8b515 100644
--- a/Lib/fontTools/misc/sstruct.py
+++ b/Lib/fontTools/misc/sstruct.py
@@ -46,8 +46,8 @@ calcsize(fmt)
it returns the size of the data in bytes.
"""
-from fontTools.misc.py23 import tobytes, tostr
from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi
+from fontTools.misc.textTools import tobytes, tostr
import struct
import re
@@ -59,7 +59,7 @@ class Error(Exception):
pass
def pack(fmt, obj):
- formatstring, names, fixes = getformat(fmt)
+ formatstring, names, fixes = getformat(fmt, keep_pad_byte=True)
elements = []
if not isinstance(obj, dict):
obj = obj.__dict__
@@ -112,7 +112,8 @@ _elementRE = re.compile(
r"\s*" # whitespace
r"([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier)
r"\s*:\s*" # whitespace : whitespace
- r"([cbBhHiIlLqQfd]|[0-9]+[ps]|" # formatchar...
+ r"([xcbB?hHiIlLqQfd]|" # formatchar...
+ r"[0-9]+[ps]|" # ...formatchar...
r"([0-9]+)\.([0-9]+)(F))" # ...formatchar
r"\s*" # whitespace
r"(#.*)?$" # [comment] + end of string
@@ -131,7 +132,7 @@ _fixedpointmappings = {
_formatcache = {}
-def getformat(fmt):
+def getformat(fmt, keep_pad_byte=False):
fmt = tostr(fmt, encoding="ascii")
try:
formatstring, names, fixes = _formatcache[fmt]
@@ -153,8 +154,9 @@ def getformat(fmt):
if not m:
raise Error("syntax error in fmt: '%s'" % line)
name = m.group(1)
- names.append(name)
formatchar = m.group(2)
+ if keep_pad_byte or formatchar != "x":
+ names.append(name)
if m.group(3):
# fixed point
before = int(m.group(3))
@@ -182,6 +184,8 @@ def _test():
astr: 5s
afloat: f; adouble: d # multiple "statements" are allowed
afixed: 16.16F
+ abool: ?
+ apad: x
"""
print('size:', calcsize(fmt))
@@ -199,6 +203,7 @@ def _test():
i.afloat = 0.5
i.adouble = 0.5
i.afixed = 1.5
+ i.abool = True
data = pack(fmt, i)
print('data:', repr(data))
diff --git a/Lib/fontTools/misc/testTools.py b/Lib/fontTools/misc/testTools.py
index 1b258e37..db316a82 100644
--- a/Lib/fontTools/misc/testTools.py
+++ b/Lib/fontTools/misc/testTools.py
@@ -7,7 +7,7 @@ import shutil
import sys
import tempfile
from unittest import TestCase as _TestCase
-from fontTools.misc.py23 import tobytes
+from fontTools.misc.textTools import tobytes
from fontTools.misc.xmlWriter import XMLWriter
@@ -38,6 +38,14 @@ def parseXML(xmlSnippet):
return reader.root[2]
+def parseXmlInto(font, parseInto, xmlSnippet):
+ parsed_xml = [e for e in parseXML(xmlSnippet.strip()) if not isinstance(e, str)]
+ for name, attrs, content in parsed_xml:
+ parseInto.fromXML(name, attrs, content, font)
+ parseInto.populateDefaults()
+ return parseInto
+
+
class FakeFont:
def __init__(self, glyphs):
self.glyphOrder_ = glyphs
@@ -57,11 +65,16 @@ class FakeFont:
def getGlyphID(self, name):
return self.reverseGlyphOrderDict_[name]
+ def getGlyphIDMany(self, lst):
+ return [self.getGlyphID(gid) for gid in lst]
+
def getGlyphName(self, glyphID):
if glyphID < len(self.glyphOrder_):
return self.glyphOrder_[glyphID]
else:
return "glyph%.5d" % glyphID
+ def getGlyphNameMany(self, lst):
+ return [self.getGlyphName(gid) for gid in lst]
def getGlyphOrder(self):
return self.glyphOrder_
@@ -136,7 +149,7 @@ class MockFont(object):
self._reverseGlyphOrder = AllocatingDict({'.notdef': 0})
self.lazy = False
- def getGlyphID(self, glyph, requireReal=None):
+ def getGlyphID(self, glyph):
gid = self._reverseGlyphOrder[glyph]
return gid
diff --git a/Lib/fontTools/misc/textTools.py b/Lib/fontTools/misc/textTools.py
index 072976af..bf75bcbd 100644
--- a/Lib/fontTools/misc/textTools.py
+++ b/Lib/fontTools/misc/textTools.py
@@ -1,7 +1,6 @@
"""fontTools.misc.textTools.py -- miscellaneous routines."""
-from fontTools.misc.py23 import bytechr, byteord, bytesjoin, strjoin, tobytes
import ast
import string
@@ -10,6 +9,29 @@ import string
safeEval = ast.literal_eval
+class Tag(str):
+ @staticmethod
+ def transcode(blob):
+ if isinstance(blob, bytes):
+ blob = blob.decode("latin-1")
+ return blob
+
+ def __new__(self, content):
+ return str.__new__(self, self.transcode(content))
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __eq__(self, other):
+ return str.__eq__(self, self.transcode(other))
+
+ def __hash__(self):
+ return str.__hash__(self)
+
+ def tobytes(self):
+ return self.encode("latin-1")
+
+
def readHex(content):
"""Convert a list of hex strings to binary data."""
return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, str)))
@@ -97,6 +119,36 @@ def pad(data, size):
return data
+def tostr(s, encoding="ascii", errors="strict"):
+ if not isinstance(s, str):
+ return s.decode(encoding, errors)
+ else:
+ return s
+
+
+def tobytes(s, encoding="ascii", errors="strict"):
+ if isinstance(s, str):
+ return s.encode(encoding, errors)
+ else:
+ return bytes(s)
+
+
+def bytechr(n):
+ return bytes([n])
+
+
+def byteord(c):
+ return c if isinstance(c, int) else ord(c)
+
+
+def strjoin(iterable, joiner=""):
+ return tostr(joiner).join(iterable)
+
+
+def bytesjoin(iterable, joiner=b""):
+ return tobytes(joiner).join(tobytes(item) for item in iterable)
+
+
if __name__ == "__main__":
import doctest, sys
sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/misc/transform.py b/Lib/fontTools/misc/transform.py
index 997598f5..94e1f622 100644
--- a/Lib/fontTools/misc/transform.py
+++ b/Lib/fontTools/misc/transform.py
@@ -10,12 +10,16 @@ used as dictionary keys.
This module exports the following symbols:
- Transform -- this is the main class
- Identity -- Transform instance set to the identity transformation
- Offset -- Convenience function that returns a translating transformation
- Scale -- Convenience function that returns a scaling transformation
+Transform
+ this is the main class
+Identity
+ Transform instance set to the identity transformation
+Offset
+ Convenience function that returns a translating transformation
+Scale
+ Convenience function that returns a scaling transformation
-Examples:
+:Example:
>>> t = Transform(2, 0, 0, 3, 0, 0)
>>> t.transformPoint((100, 100))
@@ -72,7 +76,8 @@ class Transform(NamedTuple):
Transform instances are immutable: all transforming methods, eg.
rotate(), return a new Transform instance.
- Examples:
+ :Example:
+
>>> t = Transform()
>>> t
<Transform [1 0 0 1 0 0]>
@@ -85,7 +90,8 @@ class Transform(NamedTuple):
(200, 300)
Transform's constructor takes six arguments, all of which are
- optional, and can be used as keyword arguments:
+ optional, and can be used as keyword arguments::
+
>>> Transform(12)
<Transform [12 0 0 1 0 0]>
>>> Transform(dx=12)
@@ -93,7 +99,8 @@ class Transform(NamedTuple):
>>> Transform(yx=12)
<Transform [1 0 12 1 0 0]>
- Transform instances also behave like sequences of length 6:
+ Transform instances also behave like sequences of length 6::
+
>>> len(Identity)
6
>>> list(Identity)
@@ -101,13 +108,15 @@ class Transform(NamedTuple):
>>> tuple(Identity)
(1, 0, 0, 1, 0, 0)
- Transform instances are comparable:
+ Transform instances are comparable::
+
>>> t1 = Identity.scale(2, 3).translate(4, 6)
>>> t2 = Identity.translate(8, 18).scale(2, 3)
>>> t1 == t2
1
- But beware of floating point rounding errors:
+ But beware of floating point rounding errors::
+
>>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
>>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
>>> t1
@@ -118,12 +127,14 @@ class Transform(NamedTuple):
0
Transform instances are hashable, meaning you can use them as
- keys in dictionaries:
+ keys in dictionaries::
+
>>> d = {Scale(12, 13): None}
>>> d
{<Transform [12 0 0 13 0 0]>: None}
- But again, beware of floating point rounding errors:
+ But again, beware of floating point rounding errors::
+
>>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
>>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
>>> t1
@@ -149,7 +160,8 @@ class Transform(NamedTuple):
def transformPoint(self, p):
"""Transform a point.
- Example:
+ :Example:
+
>>> t = Transform()
>>> t = t.scale(2.5, 5.5)
>>> t.transformPoint((100, 100))
@@ -162,7 +174,8 @@ class Transform(NamedTuple):
def transformPoints(self, points):
"""Transform a list of points.
- Example:
+ :Example:
+
>>> t = Scale(2, 3)
>>> t.transformPoints([(0, 0), (0, 100), (100, 100), (100, 0)])
[(0, 0), (0, 300), (200, 300), (200, 0)]
@@ -171,10 +184,36 @@ class Transform(NamedTuple):
xx, xy, yx, yy, dx, dy = self
return [(xx*x + yx*y + dx, xy*x + yy*y + dy) for x, y in points]
+ def transformVector(self, v):
+ """Transform an (dx, dy) vector, treating translation as zero.
+
+ :Example:
+
+ >>> t = Transform(2, 0, 0, 2, 10, 20)
+ >>> t.transformVector((3, -4))
+ (6, -8)
+ >>>
+ """
+ (dx, dy) = v
+ xx, xy, yx, yy = self[:4]
+ return (xx*dx + yx*dy, xy*dx + yy*dy)
+
+ def transformVectors(self, vectors):
+ """Transform a list of (dx, dy) vector, treating translation as zero.
+
+ :Example:
+ >>> t = Transform(2, 0, 0, 2, 10, 20)
+ >>> t.transformVectors([(3, -4), (5, -6)])
+ [(6, -8), (10, -12)]
+ >>>
+ """
+ xx, xy, yx, yy = self[:4]
+ return [(xx*dx + yx*dy, xy*dx + yy*dy) for dx, dy in vectors]
+
def translate(self, x=0, y=0):
"""Return a new transformation, translated (offset) by x, y.
- Example:
+ :Example:
>>> t = Transform()
>>> t.translate(20, 30)
<Transform [1 0 0 1 20 30]>
@@ -186,7 +225,7 @@ class Transform(NamedTuple):
"""Return a new transformation, scaled by x, y. The 'y' argument
may be None, which implies to use the x value for y as well.
- Example:
+ :Example:
>>> t = Transform()
>>> t.scale(5)
<Transform [5 0 0 5 0 0]>
@@ -201,7 +240,7 @@ class Transform(NamedTuple):
def rotate(self, angle):
"""Return a new transformation, rotated by 'angle' (radians).
- Example:
+ :Example:
>>> import math
>>> t = Transform()
>>> t.rotate(math.pi / 2)
@@ -216,7 +255,7 @@ class Transform(NamedTuple):
def skew(self, x=0, y=0):
"""Return a new transformation, skewed by x and y.
- Example:
+ :Example:
>>> import math
>>> t = Transform()
>>> t.skew(math.pi / 4)
@@ -230,7 +269,7 @@ class Transform(NamedTuple):
"""Return a new transformation, transformed by another
transformation.
- Example:
+ :Example:
>>> t = Transform(2, 0, 0, 3, 1, 6)
>>> t.transform((4, 3, 2, 1, 5, 6))
<Transform [8 9 4 3 11 24]>
@@ -251,7 +290,7 @@ class Transform(NamedTuple):
transformed by self. self.reverseTransform(other) is equivalent to
other.transform(self).
- Example:
+ :Example:
>>> t = Transform(2, 0, 0, 3, 1, 6)
>>> t.reverseTransform((4, 3, 2, 1, 5, 6))
<Transform [8 6 6 3 21 15]>
@@ -272,7 +311,7 @@ class Transform(NamedTuple):
def inverse(self):
"""Return the inverse transformation.
- Example:
+ :Example:
>>> t = Identity.translate(2, 3).scale(4, 5)
>>> t.transformPoint((10, 20))
(42, 103)
@@ -290,7 +329,10 @@ class Transform(NamedTuple):
return self.__class__(xx, xy, yx, yy, dx, dy)
def toPS(self):
- """Return a PostScript representation:
+ """Return a PostScript representation
+
+ :Example:
+
>>> t = Identity.scale(2, 3).translate(4, 5)
>>> t.toPS()
'[2 0 0 3 8 15]'
@@ -300,6 +342,9 @@ class Transform(NamedTuple):
def __bool__(self):
"""Returns True if transform is not identity, False otherwise.
+
+ :Example:
+
>>> bool(Identity)
False
>>> bool(Transform())
@@ -326,7 +371,7 @@ Identity = Transform()
def Offset(x=0, y=0):
"""Return the identity transformation offset by x, y.
- Example:
+ :Example:
>>> Offset(2, 3)
<Transform [1 0 0 1 2 3]>
>>>
@@ -337,7 +382,7 @@ def Scale(x, y=None):
"""Return the identity transformation scaled by x, y. The 'y' argument
may be None, which implies to use the x value for y as well.
- Example:
+ :Example:
>>> Scale(2, 3)
<Transform [2 0 0 3 0 0]>
>>>
diff --git a/Lib/fontTools/misc/xmlReader.py b/Lib/fontTools/misc/xmlReader.py
index b2707e99..6ec50de4 100644
--- a/Lib/fontTools/misc/xmlReader.py
+++ b/Lib/fontTools/misc/xmlReader.py
@@ -93,11 +93,12 @@ class XMLReader(object):
if not stackSize:
if name != "ttFont":
raise TTXParseError("illegal root tag: %s" % name)
- sfntVersion = attrs.get("sfntVersion")
- if sfntVersion is not None:
- if len(sfntVersion) != 4:
- sfntVersion = safeEval('"' + sfntVersion + '"')
- self.ttFont.sfntVersion = sfntVersion
+ if self.ttFont.reader is None and not self.ttFont.tables:
+ sfntVersion = attrs.get("sfntVersion")
+ if sfntVersion is not None:
+ if len(sfntVersion) != 4:
+ sfntVersion = safeEval('"' + sfntVersion + '"')
+ self.ttFont.sfntVersion = sfntVersion
self.contentStack.append([])
elif stackSize == 1:
if subFile is not None:
diff --git a/Lib/fontTools/misc/xmlWriter.py b/Lib/fontTools/misc/xmlWriter.py
index fec127a9..9e30fa33 100644
--- a/Lib/fontTools/misc/xmlWriter.py
+++ b/Lib/fontTools/misc/xmlWriter.py
@@ -1,6 +1,6 @@
"""xmlWriter.py -- Simple XML authoring class"""
-from fontTools.misc.py23 import byteord, strjoin, tobytes, tostr
+from fontTools.misc.textTools import byteord, strjoin, tobytes, tostr
import sys
import os
import string
@@ -11,7 +11,7 @@ INDENT = " "
class XMLWriter(object):
def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="utf_8",
- newlinestr=None):
+ newlinestr="\n"):
if encoding.lower().replace('-','').replace('_','') != 'utf8':
raise Exception('Only UTF-8 encoding is supported.')
if fileOrPath == '-':
diff --git a/Lib/fontTools/otlLib/builder.py b/Lib/fontTools/otlLib/builder.py
index 182f7da6..e3f33551 100644
--- a/Lib/fontTools/otlLib/builder.py
+++ b/Lib/fontTools/otlLib/builder.py
@@ -1,4 +1,5 @@
from collections import namedtuple, OrderedDict
+import os
from fontTools.misc.fixedTools import fixedToFloat
from fontTools import ttLib
from fontTools.ttLib.tables import otTables as ot
@@ -10,6 +11,11 @@ from fontTools.ttLib.tables.otBase import (
)
from fontTools.ttLib.tables import otBase
from fontTools.feaLib.ast import STATNameStatement
+from fontTools.otlLib.optimize.gpos import (
+ GPOS_COMPACT_MODE_DEFAULT,
+ GPOS_COMPACT_MODE_ENV_KEY,
+ compact_lookup,
+)
from fontTools.otlLib.error import OpenTypeLibError
from functools import reduce
import logging
@@ -22,7 +28,7 @@ log = logging.getLogger(__name__)
def buildCoverage(glyphs, glyphMap):
"""Builds a coverage table.
- Coverage tables (as defined in the `OpenType spec <https://docs.microsoft.com/en-gb/typography/opentype/spec/chapter2#coverage-table>`_)
+ Coverage tables (as defined in the `OpenType spec <https://docs.microsoft.com/en-gb/typography/opentype/spec/chapter2#coverage-table>`__)
are used in all OpenType Layout lookups apart from the Extension type, and
define the glyphs involved in a layout subtable. This allows shaping engines
to compare the glyph stream with the coverage table and quickly determine
@@ -50,7 +56,7 @@ def buildCoverage(glyphs, glyphMap):
if not glyphs:
return None
self = ot.Coverage()
- self.glyphs = sorted(glyphs, key=glyphMap.__getitem__)
+ self.glyphs = sorted(set(glyphs), key=glyphMap.__getitem__)
return self
@@ -64,7 +70,7 @@ LOOKUP_FLAG_USE_MARK_FILTERING_SET = 0x0010
def buildLookup(subtables, flags=0, markFilterSet=None):
"""Turns a collection of rules into a lookup.
- A Lookup (as defined in the `OpenType Spec <https://docs.microsoft.com/en-gb/typography/opentype/spec/chapter2#lookupTbl>`_)
+ A Lookup (as defined in the `OpenType Spec <https://docs.microsoft.com/en-gb/typography/opentype/spec/chapter2#lookupTbl>`__)
wraps the individual rules in a layout operation (substitution or
positioning) in a data structure expressing their overall lookup type -
for example, single substitution, mark-to-base attachment, and so on -
@@ -386,7 +392,21 @@ class ChainContextualBuilder(LookupBuilder):
if not ruleset.hasAnyGlyphClasses:
candidates[1] = [self.buildFormat1Subtable(ruleset, chaining)]
+ for i in [1, 2, 3]:
+ if candidates[i]:
+ try:
+ self.getCompiledSize_(candidates[i])
+ except Exception as e:
+ log.warning(
+ "Contextual format %i at %s overflowed (%s)"
+ % (i, str(self.location), e)
+ )
+ candidates[i] = None
+
candidates = [x for x in candidates if x is not None]
+ if not candidates:
+ raise OpenTypeLibError("All candidates overflowed", self.location)
+
winner = min(candidates, key=self.getCompiledSize_)
subtables.extend(winner)
@@ -943,12 +963,22 @@ class MarkBasePosBuilder(LookupBuilder):
positioning lookup.
"""
markClasses = self.buildMarkClasses_(self.marks)
- marks = {
- mark: (markClasses[mc], anchor) for mark, (mc, anchor) in self.marks.items()
- }
+ marks = {}
+ for mark, (mc, anchor) in self.marks.items():
+ if mc not in markClasses:
+ raise ValueError(
+ "Mark class %s not found for mark glyph %s" % (mc, mark)
+ )
+ marks[mark] = (markClasses[mc], anchor)
bases = {}
for glyph, anchors in self.bases.items():
- bases[glyph] = {markClasses[mc]: anchor for (mc, anchor) in anchors.items()}
+ bases[glyph] = {}
+ for mc, anchor in anchors.items():
+ if mc not in markClasses:
+ raise ValueError(
+ "Mark class %s not found for base glyph %s" % (mc, mark)
+ )
+ bases[glyph][markClasses[mc]] = anchor
subtables = buildMarkBasePos(marks, bases, self.glyphMap)
return self.buildLookup_(subtables)
@@ -1373,7 +1403,17 @@ class PairPosBuilder(LookupBuilder):
subtables.extend(buildPairPosGlyphs(self.glyphPairs, self.glyphMap))
for key in sorted(builders.keys()):
subtables.extend(builders[key].subtables())
- return self.buildLookup_(subtables)
+ lookup = self.buildLookup_(subtables)
+
+ # Compact the lookup
+ # This is a good moment to do it because the compaction should create
+ # smaller subtables, which may prevent overflows from happening.
+ mode = os.environ.get(GPOS_COMPACT_MODE_ENV_KEY, GPOS_COMPACT_MODE_DEFAULT)
+ if mode and mode != "0":
+ log.info("Compacting GPOS...")
+ compact_lookup(self.font, mode, lookup)
+
+ return lookup
class SinglePosBuilder(LookupBuilder):
@@ -2091,8 +2131,16 @@ def buildPairPosClassesSubtable(pairs, glyphMap, valueFormat1=None, valueFormat2
for c2 in classes2:
rec2 = ot.Class2Record()
val1, val2 = pairs.get((c1, c2), (None, None))
- rec2.Value1 = ValueRecord(src=val1, valueFormat=valueFormat1) if valueFormat1 else None
- rec2.Value2 = ValueRecord(src=val2, valueFormat=valueFormat2) if valueFormat2 else None
+ rec2.Value1 = (
+ ValueRecord(src=val1, valueFormat=valueFormat1)
+ if valueFormat1
+ else None
+ )
+ rec2.Value2 = (
+ ValueRecord(src=val2, valueFormat=valueFormat2)
+ if valueFormat2
+ else None
+ )
rec1.Class2Record.append(rec2)
self.Class1Count = len(self.Class1Record)
self.Class2Count = len(classes2)
@@ -2191,8 +2239,16 @@ def buildPairPosGlyphsSubtable(pairs, glyphMap, valueFormat1=None, valueFormat2=
for glyph2, val1, val2 in sorted(p[glyph], key=lambda x: glyphMap[x[0]]):
pvr = ot.PairValueRecord()
pvr.SecondGlyph = glyph2
- pvr.Value1 = ValueRecord(src=val1, valueFormat=valueFormat1) if valueFormat1 else None
- pvr.Value2 = ValueRecord(src=val2, valueFormat=valueFormat2) if valueFormat2 else None
+ pvr.Value1 = (
+ ValueRecord(src=val1, valueFormat=valueFormat1)
+ if valueFormat1
+ else None
+ )
+ pvr.Value2 = (
+ ValueRecord(src=val2, valueFormat=valueFormat2)
+ if valueFormat2
+ else None
+ )
ps.PairValueRecord.append(pvr)
ps.PairValueCount = len(ps.PairValueRecord)
self.PairSetCount = len(self.PairSet)
@@ -2313,8 +2369,13 @@ def buildSinglePosSubtable(values, glyphMap):
"""
self = ot.SinglePos()
self.Coverage = buildCoverage(values.keys(), glyphMap)
- valueFormat = self.ValueFormat = reduce(int.__or__, [v.getFormat() for v in values.values()], 0)
- valueRecords = [ValueRecord(src=values[g], valueFormat=valueFormat) for g in self.Coverage.glyphs]
+ valueFormat = self.ValueFormat = reduce(
+ int.__or__, [v.getFormat() for v in values.values()], 0
+ )
+ valueRecords = [
+ ValueRecord(src=values[g], valueFormat=valueFormat)
+ for g in self.Coverage.glyphs
+ ]
if all(v == valueRecords[0] for v in valueRecords):
self.Format = 1
if self.ValueFormat != 0:
@@ -2617,7 +2678,9 @@ AXIS_VALUE_NEGATIVE_INFINITY = fixedToFloat(-0x80000000, 16)
AXIS_VALUE_POSITIVE_INFINITY = fixedToFloat(0x7FFFFFFF, 16)
-def buildStatTable(ttFont, axes, locations=None, elidedFallbackName=2):
+def buildStatTable(
+ ttFont, axes, locations=None, elidedFallbackName=2, windowsNames=True, macNames=True
+):
"""Add a 'STAT' table to 'ttFont'.
'axes' is a list of dictionaries describing axes and their
@@ -2702,17 +2765,23 @@ def buildStatTable(ttFont, axes, locations=None, elidedFallbackName=2):
ttFont["STAT"] = ttLib.newTable("STAT")
statTable = ttFont["STAT"].table = ot.STAT()
nameTable = ttFont["name"]
- statTable.ElidedFallbackNameID = _addName(nameTable, elidedFallbackName)
+ statTable.ElidedFallbackNameID = _addName(
+ nameTable, elidedFallbackName, windows=windowsNames, mac=macNames
+ )
# 'locations' contains data for AxisValue Format 4
- axisRecords, axisValues = _buildAxisRecords(axes, nameTable)
+ axisRecords, axisValues = _buildAxisRecords(
+ axes, nameTable, windowsNames=windowsNames, macNames=macNames
+ )
if not locations:
statTable.Version = 0x00010001
else:
# We'll be adding Format 4 AxisValue records, which
# requires a higher table version
statTable.Version = 0x00010002
- multiAxisValues = _buildAxisValuesFormat4(locations, axes, nameTable)
+ multiAxisValues = _buildAxisValuesFormat4(
+ locations, axes, nameTable, windowsNames=windowsNames, macNames=macNames
+ )
axisValues = multiAxisValues + axisValues
# Store AxisRecords
@@ -2731,13 +2800,15 @@ def buildStatTable(ttFont, axes, locations=None, elidedFallbackName=2):
statTable.AxisValueCount = len(axisValues)
-def _buildAxisRecords(axes, nameTable):
+def _buildAxisRecords(axes, nameTable, windowsNames=True, macNames=True):
axisRecords = []
axisValues = []
for axisRecordIndex, axisDict in enumerate(axes):
axis = ot.AxisRecord()
axis.AxisTag = axisDict["tag"]
- axis.AxisNameID = _addName(nameTable, axisDict["name"], 256)
+ axis.AxisNameID = _addName(
+ nameTable, axisDict["name"], 256, windows=windowsNames, mac=macNames
+ )
axis.AxisOrdering = axisDict.get("ordering", axisRecordIndex)
axisRecords.append(axis)
@@ -2745,7 +2816,9 @@ def _buildAxisRecords(axes, nameTable):
axisValRec = ot.AxisValue()
axisValRec.AxisIndex = axisRecordIndex
axisValRec.Flags = axisVal.get("flags", 0)
- axisValRec.ValueNameID = _addName(nameTable, axisVal["name"])
+ axisValRec.ValueNameID = _addName(
+ nameTable, axisVal["name"], windows=windowsNames, mac=macNames
+ )
if "value" in axisVal:
axisValRec.Value = axisVal["value"]
@@ -2770,7 +2843,9 @@ def _buildAxisRecords(axes, nameTable):
return axisRecords, axisValues
-def _buildAxisValuesFormat4(locations, axes, nameTable):
+def _buildAxisValuesFormat4(
+ locations, axes, nameTable, windowsNames=True, macNames=True
+):
axisTagToIndex = {}
for axisRecordIndex, axisDict in enumerate(axes):
axisTagToIndex[axisDict["tag"]] = axisRecordIndex
@@ -2779,7 +2854,9 @@ def _buildAxisValuesFormat4(locations, axes, nameTable):
for axisLocationDict in locations:
axisValRec = ot.AxisValue()
axisValRec.Format = 4
- axisValRec.ValueNameID = _addName(nameTable, axisLocationDict["name"])
+ axisValRec.ValueNameID = _addName(
+ nameTable, axisLocationDict["name"], windows=windowsNames, mac=macNames
+ )
axisValRec.Flags = axisLocationDict.get("flags", 0)
axisValueRecords = []
for tag, value in axisLocationDict["location"].items():
@@ -2794,7 +2871,7 @@ def _buildAxisValuesFormat4(locations, axes, nameTable):
return axisValues
-def _addName(nameTable, value, minNameID=0):
+def _addName(nameTable, value, minNameID=0, windows=True, mac=True):
if isinstance(value, int):
# Already a nameID
return value
@@ -2818,4 +2895,6 @@ def _addName(nameTable, value, minNameID=0):
return nameID
else:
raise TypeError("value must be int, str, dict or list")
- return nameTable.addMultilingualName(names, minNameID=minNameID)
+ return nameTable.addMultilingualName(
+ names, windows=windows, mac=mac, minNameID=minNameID
+ )
diff --git a/Lib/fontTools/otlLib/optimize/__init__.py b/Lib/fontTools/otlLib/optimize/__init__.py
new file mode 100644
index 00000000..5c007e89
--- /dev/null
+++ b/Lib/fontTools/otlLib/optimize/__init__.py
@@ -0,0 +1,68 @@
+from argparse import RawTextHelpFormatter
+from textwrap import dedent
+
+from fontTools.ttLib import TTFont
+from fontTools.otlLib.optimize.gpos import compact, GPOS_COMPACT_MODE_DEFAULT
+
+def main(args=None):
+ """Optimize the layout tables of an existing font."""
+ from argparse import ArgumentParser
+ from fontTools import configLogger
+
+ parser = ArgumentParser(prog="otlLib.optimize", description=main.__doc__, formatter_class=RawTextHelpFormatter)
+ parser.add_argument("font")
+ parser.add_argument(
+ "-o", metavar="OUTPUTFILE", dest="outfile", default=None, help="output file"
+ )
+ parser.add_argument(
+ "--gpos-compact-mode",
+ help=dedent(
+ f"""\
+ GPOS Lookup type 2 (PairPos) compaction mode:
+ 0 = do not attempt to compact PairPos lookups;
+ 1 to 8 = create at most 1 to 8 new subtables for each existing
+ subtable, provided that it would yield a 50%% file size saving;
+ 9 = create as many new subtables as needed to yield a file size saving.
+ Default: {GPOS_COMPACT_MODE_DEFAULT}.
+
+ This compaction aims to save file size, by splitting large class
+ kerning subtables (Format 2) that contain many zero values into
+ smaller and denser subtables. It's a trade-off between the overhead
+ of several subtables versus the sparseness of one big subtable.
+
+ See the pull request: https://github.com/fonttools/fonttools/pull/2326
+ """
+ ),
+ default=int(GPOS_COMPACT_MODE_DEFAULT),
+ choices=list(range(10)),
+ type=int,
+ )
+ logging_group = parser.add_mutually_exclusive_group(required=False)
+ logging_group.add_argument(
+ "-v", "--verbose", action="store_true", help="Run more verbosely."
+ )
+ logging_group.add_argument(
+ "-q", "--quiet", action="store_true", help="Turn verbosity off."
+ )
+ options = parser.parse_args(args)
+
+ configLogger(
+ level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
+ )
+
+ font = TTFont(options.font)
+ # TODO: switch everything to have type(mode) = int when using the Config class
+ compact(font, str(options.gpos_compact_mode))
+ font.save(options.outfile or options.font)
+
+
+
+if __name__ == "__main__":
+ import sys
+
+ if len(sys.argv) > 1:
+ sys.exit(main())
+ import doctest
+
+ sys.exit(doctest.testmod().failed)
+
diff --git a/Lib/fontTools/otlLib/optimize/__main__.py b/Lib/fontTools/otlLib/optimize/__main__.py
new file mode 100644
index 00000000..03027ecd
--- /dev/null
+++ b/Lib/fontTools/otlLib/optimize/__main__.py
@@ -0,0 +1,6 @@
+import sys
+from fontTools.otlLib.optimize import main
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/Lib/fontTools/otlLib/optimize/gpos.py b/Lib/fontTools/otlLib/optimize/gpos.py
new file mode 100644
index 00000000..79873fad
--- /dev/null
+++ b/Lib/fontTools/otlLib/optimize/gpos.py
@@ -0,0 +1,439 @@
+import logging
+from collections import defaultdict, namedtuple
+from functools import reduce
+from itertools import chain
+from math import log2
+from typing import DefaultDict, Dict, Iterable, List, Sequence, Tuple
+
+from fontTools.misc.intTools import bit_count, bit_indices
+from fontTools.ttLib import TTFont
+from fontTools.ttLib.tables import otBase, otTables
+
+# NOTE: activating this optimization via the environment variable is
+# experimental and may not be supported once an alternative mechanism
+# is in place. See: https://github.com/fonttools/fonttools/issues/2349
+GPOS_COMPACT_MODE_ENV_KEY = "FONTTOOLS_GPOS_COMPACT_MODE"
+GPOS_COMPACT_MODE_DEFAULT = "0"
+
+log = logging.getLogger("fontTools.otlLib.optimize.gpos")
+
+
+def compact(font: TTFont, mode: str) -> TTFont:
+ # Ideal plan:
+ # 1. Find lookups of Lookup Type 2: Pair Adjustment Positioning Subtable
+ # https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#lookup-type-2-pair-adjustment-positioning-subtable
+ # 2. Extract glyph-glyph kerning and class-kerning from all present subtables
+ # 3. Regroup into different subtable arrangements
+ # 4. Put back into the lookup
+ #
+ # Actual implementation:
+ # 2. Only class kerning is optimized currently
+ # 3. If the input kerning is already in several subtables, the subtables
+ # are not grouped together first; instead each subtable is treated
+ # independently, so currently this step is:
+ # Split existing subtables into more smaller subtables
+ gpos = font["GPOS"]
+ for lookup in gpos.table.LookupList.Lookup:
+ if lookup.LookupType == 2:
+ compact_lookup(font, mode, lookup)
+ elif lookup.LookupType == 9 and lookup.SubTable[0].ExtensionLookupType == 2:
+ compact_ext_lookup(font, mode, lookup)
+ return font
+
+
+def compact_lookup(font: TTFont, mode: str, lookup: otTables.Lookup) -> None:
+ new_subtables = compact_pair_pos(font, mode, lookup.SubTable)
+ lookup.SubTable = new_subtables
+ lookup.SubTableCount = len(new_subtables)
+
+
+def compact_ext_lookup(font: TTFont, mode: str, lookup: otTables.Lookup) -> None:
+ new_subtables = compact_pair_pos(
+ font, mode, [ext_subtable.ExtSubTable for ext_subtable in lookup.SubTable]
+ )
+ new_ext_subtables = []
+ for subtable in new_subtables:
+ ext_subtable = otTables.ExtensionPos()
+ ext_subtable.Format = 1
+ ext_subtable.ExtSubTable = subtable
+ new_ext_subtables.append(ext_subtable)
+ lookup.SubTable = new_ext_subtables
+ lookup.SubTableCount = len(new_ext_subtables)
+
+
+def compact_pair_pos(
+ font: TTFont, mode: str, subtables: Sequence[otTables.PairPos]
+) -> Sequence[otTables.PairPos]:
+ new_subtables = []
+ for subtable in subtables:
+ if subtable.Format == 1:
+ # Not doing anything to Format 1 (yet?)
+ new_subtables.append(subtable)
+ elif subtable.Format == 2:
+ new_subtables.extend(compact_class_pairs(font, mode, subtable))
+ return new_subtables
+
+
+def compact_class_pairs(
+ font: TTFont, mode: str, subtable: otTables.PairPos
+) -> List[otTables.PairPos]:
+ from fontTools.otlLib.builder import buildPairPosClassesSubtable
+
+ subtables = []
+ classes1: DefaultDict[int, List[str]] = defaultdict(list)
+ for g in subtable.Coverage.glyphs:
+ classes1[subtable.ClassDef1.classDefs.get(g, 0)].append(g)
+ classes2: DefaultDict[int, List[str]] = defaultdict(list)
+ for g, i in subtable.ClassDef2.classDefs.items():
+ classes2[i].append(g)
+ all_pairs = {}
+ for i, class1 in enumerate(subtable.Class1Record):
+ for j, class2 in enumerate(class1.Class2Record):
+ if is_really_zero(class2):
+ continue
+ all_pairs[(tuple(sorted(classes1[i])), tuple(sorted(classes2[j])))] = (
+ getattr(class2, "Value1", None),
+ getattr(class2, "Value2", None),
+ )
+
+ if len(mode) == 1 and mode in "123456789":
+ grouped_pairs = cluster_pairs_by_class2_coverage_custom_cost(
+ font, all_pairs, int(mode)
+ )
+ for pairs in grouped_pairs:
+ subtables.append(
+ buildPairPosClassesSubtable(pairs, font.getReverseGlyphMap())
+ )
+ else:
+ raise ValueError(f"Bad {GPOS_COMPACT_MODE_ENV_KEY}={mode}")
+ return subtables
+
+
+def is_really_zero(class2: otTables.Class2Record) -> bool:
+ v1 = getattr(class2, "Value1", None)
+ v2 = getattr(class2, "Value2", None)
+ return (v1 is None or v1.getEffectiveFormat() == 0) and (
+ v2 is None or v2.getEffectiveFormat() == 0
+ )
+
+
+Pairs = Dict[
+ Tuple[Tuple[str, ...], Tuple[str, ...]],
+ Tuple[otBase.ValueRecord, otBase.ValueRecord],
+]
+
+# Adapted from https://github.com/fonttools/fonttools/blob/f64f0b42f2d1163b2d85194e0979def539f5dca3/Lib/fontTools/ttLib/tables/otTables.py#L935-L958
+def _getClassRanges(glyphIDs: Iterable[int]):
+ glyphIDs = sorted(glyphIDs)
+ last = glyphIDs[0]
+ ranges = [[last]]
+ for glyphID in glyphIDs[1:]:
+ if glyphID != last + 1:
+ ranges[-1].append(last)
+ ranges.append([glyphID])
+ last = glyphID
+ ranges[-1].append(last)
+ return ranges, glyphIDs[0], glyphIDs[-1]
+
+
+# Adapted from https://github.com/fonttools/fonttools/blob/f64f0b42f2d1163b2d85194e0979def539f5dca3/Lib/fontTools/ttLib/tables/otTables.py#L960-L989
+def _classDef_bytes(
+ class_data: List[Tuple[List[Tuple[int, int]], int, int]],
+ class_ids: List[int],
+ coverage=False,
+):
+ if not class_ids:
+ return 0
+ first_ranges, min_glyph_id, max_glyph_id = class_data[class_ids[0]]
+ range_count = len(first_ranges)
+ for i in class_ids[1:]:
+ data = class_data[i]
+ range_count += len(data[0])
+ min_glyph_id = min(min_glyph_id, data[1])
+ max_glyph_id = max(max_glyph_id, data[2])
+ glyphCount = max_glyph_id - min_glyph_id + 1
+ # https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-1
+ format1_bytes = 6 + glyphCount * 2
+ # https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-2
+ format2_bytes = 4 + range_count * 6
+ return min(format1_bytes, format2_bytes)
+
+
+ClusteringContext = namedtuple(
+ "ClusteringContext",
+ [
+ "lines",
+ "all_class1",
+ "all_class1_data",
+ "all_class2_data",
+ "valueFormat1_bytes",
+ "valueFormat2_bytes",
+ ],
+)
+
+
+class Cluster:
+ # TODO(Python 3.7): Turn this into a dataclass
+ # ctx: ClusteringContext
+ # indices: int
+ # Caches
+ # TODO(Python 3.8): use functools.cached_property instead of the
+ # manually cached properties, and remove the cache fields listed below.
+ # _indices: Optional[List[int]] = None
+ # _column_indices: Optional[List[int]] = None
+ # _cost: Optional[int] = None
+
+ __slots__ = "ctx", "indices_bitmask", "_indices", "_column_indices", "_cost"
+
+ def __init__(self, ctx: ClusteringContext, indices_bitmask: int):
+ self.ctx = ctx
+ self.indices_bitmask = indices_bitmask
+ self._indices = None
+ self._column_indices = None
+ self._cost = None
+
+ @property
+ def indices(self):
+ if self._indices is None:
+ self._indices = bit_indices(self.indices_bitmask)
+ return self._indices
+
+ @property
+ def column_indices(self):
+ if self._column_indices is None:
+ # Indices of columns that have a 1 in at least 1 line
+ # => binary OR all the lines
+ bitmask = reduce(int.__or__, (self.ctx.lines[i] for i in self.indices))
+ self._column_indices = bit_indices(bitmask)
+ return self._column_indices
+
+ @property
+ def width(self):
+ # Add 1 because Class2=0 cannot be used but needs to be encoded.
+ return len(self.column_indices) + 1
+
+ @property
+ def cost(self):
+ if self._cost is None:
+ self._cost = (
+ # 2 bytes to store the offset to this subtable in the Lookup table above
+ 2
+ # Contents of the subtable
+ # From: https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#pair-adjustment-positioning-format-2-class-pair-adjustment
+ # uint16 posFormat Format identifier: format = 2
+ + 2
+ # Offset16 coverageOffset Offset to Coverage table, from beginning of PairPos subtable.
+ + 2
+ + self.coverage_bytes
+ # uint16 valueFormat1 ValueRecord definition — for the first glyph of the pair (may be zero).
+ + 2
+ # uint16 valueFormat2 ValueRecord definition — for the second glyph of the pair (may be zero).
+ + 2
+ # Offset16 classDef1Offset Offset to ClassDef table, from beginning of PairPos subtable — for the first glyph of the pair.
+ + 2
+ + self.classDef1_bytes
+ # Offset16 classDef2Offset Offset to ClassDef table, from beginning of PairPos subtable — for the second glyph of the pair.
+ + 2
+ + self.classDef2_bytes
+ # uint16 class1Count Number of classes in classDef1 table — includes Class 0.
+ + 2
+ # uint16 class2Count Number of classes in classDef2 table — includes Class 0.
+ + 2
+ # Class1Record class1Records[class1Count] Array of Class1 records, ordered by classes in classDef1.
+ + (self.ctx.valueFormat1_bytes + self.ctx.valueFormat2_bytes)
+ * len(self.indices)
+ * self.width
+ )
+ return self._cost
+
+ @property
+ def coverage_bytes(self):
+ format1_bytes = (
+ # From https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#coverage-format-1
+ # uint16 coverageFormat Format identifier — format = 1
+ # uint16 glyphCount Number of glyphs in the glyph array
+ 4
+ # uint16 glyphArray[glyphCount] Array of glyph IDs — in numerical order
+ + sum(len(self.ctx.all_class1[i]) for i in self.indices) * 2
+ )
+ ranges = sorted(
+ chain.from_iterable(self.ctx.all_class1_data[i][0] for i in self.indices)
+ )
+ merged_range_count = 0
+ last = None
+ for (start, end) in ranges:
+ if last is not None and start != last + 1:
+ merged_range_count += 1
+ last = end
+ format2_bytes = (
+ # From https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#coverage-format-2
+ # uint16 coverageFormat Format identifier — format = 2
+ # uint16 rangeCount Number of RangeRecords
+ 4
+ # RangeRecord rangeRecords[rangeCount] Array of glyph ranges — ordered by startGlyphID.
+ # uint16 startGlyphID First glyph ID in the range
+ # uint16 endGlyphID Last glyph ID in the range
+ # uint16 startCoverageIndex Coverage Index of first glyph ID in range
+ + merged_range_count * 6
+ )
+ return min(format1_bytes, format2_bytes)
+
+ @property
+ def classDef1_bytes(self):
+ # We can skip encoding one of the Class1 definitions, and use
+ # Class1=0 to represent it instead, because Class1 is gated by the
+ # Coverage definition. Use Class1=0 for the highest byte savings.
+ # Going through all options takes too long, pick the biggest class
+ # = what happens in otlLib.builder.ClassDefBuilder.classes()
+ biggest_index = max(self.indices, key=lambda i: len(self.ctx.all_class1[i]))
+ return _classDef_bytes(
+ self.ctx.all_class1_data, [i for i in self.indices if i != biggest_index]
+ )
+
+ @property
+ def classDef2_bytes(self):
+ # All Class2 need to be encoded because we can't use Class2=0
+ return _classDef_bytes(self.ctx.all_class2_data, self.column_indices)
+
+
+def cluster_pairs_by_class2_coverage_custom_cost(
+ font: TTFont,
+ pairs: Pairs,
+ compression: int = 5,
+) -> List[Pairs]:
+ if not pairs:
+ # The subtable was actually empty?
+ return [pairs]
+
+ # Sorted for reproducibility/determinism
+ all_class1 = sorted(set(pair[0] for pair in pairs))
+ all_class2 = sorted(set(pair[1] for pair in pairs))
+
+ # Use Python's big ints for binary vectors representing each line
+ lines = [
+ sum(
+ 1 << i if (class1, class2) in pairs else 0
+ for i, class2 in enumerate(all_class2)
+ )
+ for class1 in all_class1
+ ]
+
+ # Map glyph names to ids and work with ints throughout for ClassDef formats
+ name_to_id = font.getReverseGlyphMap()
+ # Each entry in the arrays below is (range_count, min_glyph_id, max_glyph_id)
+ all_class1_data = [
+ _getClassRanges(name_to_id[name] for name in cls) for cls in all_class1
+ ]
+ all_class2_data = [
+ _getClassRanges(name_to_id[name] for name in cls) for cls in all_class2
+ ]
+
+ format1 = 0
+ format2 = 0
+ for pair, value in pairs.items():
+ format1 |= value[0].getEffectiveFormat() if value[0] else 0
+ format2 |= value[1].getEffectiveFormat() if value[1] else 0
+ valueFormat1_bytes = bit_count(format1) * 2
+ valueFormat2_bytes = bit_count(format2) * 2
+
+ ctx = ClusteringContext(
+ lines,
+ all_class1,
+ all_class1_data,
+ all_class2_data,
+ valueFormat1_bytes,
+ valueFormat2_bytes,
+ )
+
+ cluster_cache: Dict[int, Cluster] = {}
+
+ def make_cluster(indices: int) -> Cluster:
+ cluster = cluster_cache.get(indices, None)
+ if cluster is not None:
+ return cluster
+ cluster = Cluster(ctx, indices)
+ cluster_cache[indices] = cluster
+ return cluster
+
+ def merge(cluster: Cluster, other: Cluster) -> Cluster:
+ return make_cluster(cluster.indices_bitmask | other.indices_bitmask)
+
+ # Agglomerative clustering by hand, checking the cost gain of the new
+ # cluster against the previously separate clusters
+ # Start with 1 cluster per line
+ # cluster = set of lines = new subtable
+ clusters = [make_cluster(1 << i) for i in range(len(lines))]
+
+ # Cost of 1 cluster with everything
+ # `(1 << len) - 1` gives a bitmask full of 1's of length `len`
+ cost_before_splitting = make_cluster((1 << len(lines)) - 1).cost
+ log.debug(f" len(clusters) = {len(clusters)}")
+
+ while len(clusters) > 1:
+ lowest_cost_change = None
+ best_cluster_index = None
+ best_other_index = None
+ best_merged = None
+ for i, cluster in enumerate(clusters):
+ for j, other in enumerate(clusters[i + 1 :]):
+ merged = merge(cluster, other)
+ cost_change = merged.cost - cluster.cost - other.cost
+ if lowest_cost_change is None or cost_change < lowest_cost_change:
+ lowest_cost_change = cost_change
+ best_cluster_index = i
+ best_other_index = i + 1 + j
+ best_merged = merged
+ assert lowest_cost_change is not None
+ assert best_cluster_index is not None
+ assert best_other_index is not None
+ assert best_merged is not None
+
+ # If the best merge we found is still taking down the file size, then
+ # there's no question: we must do it, because it's beneficial in both
+ # ways (lower file size and lower number of subtables). However, if the
+ # best merge we found is not reducing file size anymore, then we need to
+ # look at the other stop criteria = the compression factor.
+ if lowest_cost_change > 0:
+ # Stop critera: check whether we should keep merging.
+ # Compute size reduction brought by splitting
+ cost_after_splitting = sum(c.cost for c in clusters)
+ # size_reduction so that after = before * (1 - size_reduction)
+ # E.g. before = 1000, after = 800, 1 - 800/1000 = 0.2
+ size_reduction = 1 - cost_after_splitting / cost_before_splitting
+
+ # Force more merging by taking into account the compression number.
+ # Target behaviour: compression number = 1 to 9, default 5 like gzip
+ # - 1 = accept to add 1 subtable to reduce size by 50%
+ # - 5 = accept to add 5 subtables to reduce size by 50%
+ # See https://github.com/harfbuzz/packtab/blob/master/Lib/packTab/__init__.py#L690-L691
+ # Given the size reduction we have achieved so far, compute how many
+ # new subtables are acceptable.
+ max_new_subtables = -log2(1 - size_reduction) * compression
+ log.debug(
+ f" len(clusters) = {len(clusters):3d} size_reduction={size_reduction:5.2f} max_new_subtables={max_new_subtables}",
+ )
+ if compression == 9:
+ # Override level 9 to mean: create any number of subtables
+ max_new_subtables = len(clusters)
+
+ # If we have managed to take the number of new subtables below the
+ # threshold, then we can stop.
+ if len(clusters) <= max_new_subtables + 1:
+ break
+
+ # No reason to stop yet, do the merge and move on to the next.
+ del clusters[best_other_index]
+ clusters[best_cluster_index] = best_merged
+
+ # All clusters are final; turn bitmasks back into the "Pairs" format
+ pairs_by_class1: Dict[Tuple[str, ...], Pairs] = defaultdict(dict)
+ for pair, values in pairs.items():
+ pairs_by_class1[pair[0]][pair] = values
+ pairs_groups: List[Pairs] = []
+ for cluster in clusters:
+ pairs_group: Pairs = dict()
+ for i in cluster.indices:
+ class1 = all_class1[i]
+ pairs_group.update(pairs_by_class1[class1])
+ pairs_groups.append(pairs_group)
+ return pairs_groups
diff --git a/Lib/fontTools/pens/basePen.py b/Lib/fontTools/pens/basePen.py
index 2161e021..e06c00ef 100644
--- a/Lib/fontTools/pens/basePen.py
+++ b/Lib/fontTools/pens/basePen.py
@@ -8,7 +8,7 @@ it is an abstraction for drawing outlines, making sure that outline objects
don't need to know the details about how and where they're being drawn, and
that drawings don't need to know the details of how outlines are stored.
-The most basic pattern is this:
+The most basic pattern is this::
outline.draw(pen) # 'outline' draws itself onto 'pen'
@@ -21,13 +21,13 @@ The AbstractPen class defines the Pen protocol. It implements almost
nothing (only no-op closePath() and endPath() methods), but is useful
for documentation purposes. Subclassing it basically tells the reader:
"this class implements the Pen protocol.". An examples of an AbstractPen
-subclass is fontTools.pens.transformPen.TransformPen.
+subclass is :py:class:`fontTools.pens.transformPen.TransformPen`.
The BasePen class is a base implementation useful for pens that actually
draw (for example a pen renders outlines using a native graphics engine).
BasePen contains a lot of base functionality, making it very easy to build
a pen that fully conforms to the pen protocol. Note that if you subclass
-BasePen, you _don't_ override moveTo(), lineTo(), etc., but _moveTo(),
+BasePen, you *don't* override moveTo(), lineTo(), etc., but _moveTo(),
_lineTo(), etc. See the BasePen doc string for details. Examples of
BasePen subclasses are fontTools.pens.boundsPen.BoundsPen and
fontTools.pens.cocoaPen.CocoaPen.
@@ -40,10 +40,14 @@ from typing import Tuple
from fontTools.misc.loggingTools import LogMixin
-__all__ = ["AbstractPen", "NullPen", "BasePen",
+__all__ = ["AbstractPen", "NullPen", "BasePen", "PenError",
"decomposeSuperBezierSegment", "decomposeQuadraticSegment"]
+class PenError(Exception):
+ """Represents an error during penning."""
+
+
class AbstractPen:
def moveTo(self, pt: Tuple[float, float]) -> None:
@@ -147,7 +151,7 @@ class NullPen(AbstractPen):
class LoggingPen(LogMixin, AbstractPen):
- """A pen with a `log` property (see fontTools.misc.loggingTools.LogMixin)
+ """A pen with a ``log`` property (see fontTools.misc.loggingTools.LogMixin)
"""
pass
diff --git a/Lib/fontTools/pens/boundsPen.py b/Lib/fontTools/pens/boundsPen.py
index 810715ca..227c22f5 100644
--- a/Lib/fontTools/pens/boundsPen.py
+++ b/Lib/fontTools/pens/boundsPen.py
@@ -14,10 +14,11 @@ class ControlBoundsPen(BasePen):
on their extremes.
When the shape has been drawn, the bounds are available as the
- 'bounds' attribute of the pen object. It's a 4-tuple:
+ ``bounds`` attribute of the pen object. It's a 4-tuple::
+
(xMin, yMin, xMax, yMax).
- If 'ignoreSinglePoints' is True, single points are ignored.
+ If ``ignoreSinglePoints`` is True, single points are ignored.
"""
def __init__(self, glyphSet, ignoreSinglePoints=False):
@@ -73,7 +74,8 @@ class BoundsPen(ControlBoundsPen):
than the "control bounds".
When the shape has been drawn, the bounds are available as the
- 'bounds' attribute of the pen object. It's a 4-tuple:
+ ``bounds`` attribute of the pen object. It's a 4-tuple::
+
(xMin, yMin, xMax, yMax)
"""
diff --git a/Lib/fontTools/pens/cu2quPen.py b/Lib/fontTools/pens/cu2quPen.py
index 497585bc..3c4ceae9 100644
--- a/Lib/fontTools/pens/cu2quPen.py
+++ b/Lib/fontTools/pens/cu2quPen.py
@@ -23,13 +23,15 @@ class Cu2QuPen(AbstractPen):
""" A filter pen to convert cubic bezier curves to quadratic b-splines
using the FontTools SegmentPen protocol.
- other_pen: another SegmentPen used to draw the transformed outline.
- max_err: maximum approximation error in font units. For optimal results,
- if you know the UPEM of the font, we recommend setting this to a
- value equal, or close to UPEM / 1000.
- reverse_direction: flip the contours' direction but keep starting point.
- stats: a dictionary counting the point numbers of quadratic segments.
- ignore_single_points: don't emit contours containing only a single point
+ Args:
+
+ other_pen: another SegmentPen used to draw the transformed outline.
+ max_err: maximum approximation error in font units. For optimal results,
+ if you know the UPEM of the font, we recommend setting this to a
+ value equal, or close to UPEM / 1000.
+ reverse_direction: flip the contours' direction but keep starting point.
+ stats: a dictionary counting the point numbers of quadratic segments.
+ ignore_single_points: don't emit contours containing only a single point
NOTE: The "ignore_single_points" argument is deprecated since v1.3.0,
which dropped Robofab subpport. It's no longer needed to special-case
@@ -138,12 +140,13 @@ class Cu2QuPointPen(BasePointToSegmentPen):
""" A filter pen to convert cubic bezier curves to quadratic b-splines
using the RoboFab PointPen protocol.
- other_point_pen: another PointPen used to draw the transformed outline.
- max_err: maximum approximation error in font units. For optimal results,
- if you know the UPEM of the font, we recommend setting this to a
- value equal, or close to UPEM / 1000.
- reverse_direction: reverse the winding direction of all contours.
- stats: a dictionary counting the point numbers of quadratic segments.
+ Args:
+ other_point_pen: another PointPen used to draw the transformed outline.
+ max_err: maximum approximation error in font units. For optimal results,
+ if you know the UPEM of the font, we recommend setting this to a
+ value equal, or close to UPEM / 1000.
+ reverse_direction: reverse the winding direction of all contours.
+ stats: a dictionary counting the point numbers of quadratic segments.
"""
def __init__(self, other_point_pen, max_err, reverse_direction=False,
diff --git a/Lib/fontTools/pens/freetypePen.py b/Lib/fontTools/pens/freetypePen.py
new file mode 100644
index 00000000..870776bc
--- /dev/null
+++ b/Lib/fontTools/pens/freetypePen.py
@@ -0,0 +1,458 @@
+# -*- coding: utf-8 -*-
+
+"""Pen to rasterize paths with FreeType."""
+
+__all__ = ["FreeTypePen"]
+
+import os
+import ctypes
+import platform
+import subprocess
+import collections
+import math
+
+import freetype
+from freetype.raw import FT_Outline_Get_Bitmap, FT_Outline_Get_BBox, FT_Outline_Get_CBox
+from freetype.ft_types import FT_Pos
+from freetype.ft_structs import FT_Vector, FT_BBox, FT_Bitmap, FT_Outline
+from freetype.ft_enums import (
+ FT_OUTLINE_NONE,
+ FT_OUTLINE_EVEN_ODD_FILL,
+ FT_PIXEL_MODE_GRAY,
+ FT_CURVE_TAG_ON,
+ FT_CURVE_TAG_CONIC,
+ FT_CURVE_TAG_CUBIC,
+)
+from freetype.ft_errors import FT_Exception
+
+from fontTools.pens.basePen import BasePen, PenError
+from fontTools.misc.roundTools import otRound
+from fontTools.misc.transform import Transform
+
+Contour = collections.namedtuple("Contour", ("points", "tags"))
+
+
+class FreeTypePen(BasePen):
+ """Pen to rasterize paths with FreeType. Requires `freetype-py` module.
+
+ Constructs ``FT_Outline`` from the paths, and renders it within a bitmap
+ buffer.
+
+ For ``array()`` and ``show()``, `numpy` and `matplotlib` must be installed.
+ For ``image()``, `Pillow` is required. Each module is lazily loaded when the
+ corresponding method is called.
+
+ Args:
+ glyphSet: a dictionary of drawable glyph objects keyed by name
+ used to resolve component references in composite glyphs.
+
+ :Examples:
+ If `numpy` and `matplotlib` is available, the following code will
+ show the glyph image of `fi` in a new window::
+
+ from fontTools.ttLib import TTFont
+ from fontTools.pens.freetypePen import FreeTypePen
+ from fontTools.misc.transform import Offset
+ pen = FreeTypePen(None)
+ font = TTFont('SourceSansPro-Regular.otf')
+ glyph = font.getGlyphSet()['fi']
+ glyph.draw(pen)
+ width, ascender, descender = glyph.width, font['OS/2'].usWinAscent, -font['OS/2'].usWinDescent
+ height = ascender - descender
+ pen.show(width=width, height=height, transform=Offset(0, -descender))
+
+ Combining with `uharfbuzz`, you can typeset a chunk of glyphs in a pen::
+
+ import uharfbuzz as hb
+ from fontTools.pens.freetypePen import FreeTypePen
+ from fontTools.pens.transformPen import TransformPen
+ from fontTools.misc.transform import Offset
+
+ en1, en2, ar, ja = 'Typesetting', 'Jeff', 'صف الحروف', 'たいぷせっと'
+ for text, font_path, direction, typo_ascender, typo_descender, vhea_ascender, vhea_descender, contain, features in (
+ (en1, 'NotoSans-Regular.ttf', 'ltr', 2189, -600, None, None, False, {"kern": True, "liga": True}),
+ (en2, 'NotoSans-Regular.ttf', 'ltr', 2189, -600, None, None, True, {"kern": True, "liga": True}),
+ (ar, 'NotoSansArabic-Regular.ttf', 'rtl', 1374, -738, None, None, False, {"kern": True, "liga": True}),
+ (ja, 'NotoSansJP-Regular.otf', 'ltr', 880, -120, 500, -500, False, {"palt": True, "kern": True}),
+ (ja, 'NotoSansJP-Regular.otf', 'ttb', 880, -120, 500, -500, False, {"vert": True, "vpal": True, "vkrn": True})
+ ):
+ blob = hb.Blob.from_file_path(font_path)
+ face = hb.Face(blob)
+ font = hb.Font(face)
+ buf = hb.Buffer()
+ buf.direction = direction
+ buf.add_str(text)
+ buf.guess_segment_properties()
+ hb.shape(font, buf, features)
+
+ x, y = 0, 0
+ pen = FreeTypePen(None)
+ for info, pos in zip(buf.glyph_infos, buf.glyph_positions):
+ gid = info.codepoint
+ transformed = TransformPen(pen, Offset(x + pos.x_offset, y + pos.y_offset))
+ font.draw_glyph_with_pen(gid, transformed)
+ x += pos.x_advance
+ y += pos.y_advance
+
+ offset, width, height = None, None, None
+ if direction in ('ltr', 'rtl'):
+ offset = (0, -typo_descender)
+ width = x
+ height = typo_ascender - typo_descender
+ else:
+ offset = (-vhea_descender, -y)
+ width = vhea_ascender - vhea_descender
+ height = -y
+ pen.show(width=width, height=height, transform=Offset(*offset), contain=contain)
+
+ For Jupyter Notebook, the rendered image will be displayed in a cell if
+ you replace ``show()`` with ``image()`` in the examples.
+ """
+
+ def __init__(self, glyphSet):
+ BasePen.__init__(self, glyphSet)
+ self.contours = []
+
+ def outline(self, transform=None, evenOdd=False):
+ """Converts the current contours to ``FT_Outline``.
+
+ Args:
+ transform: An optional 6-tuple containing an affine transformation,
+ or a ``Transform`` object from the ``fontTools.misc.transform``
+ module.
+ evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
+ """
+ transform = transform or Transform()
+ if not hasattr(transform, "transformPoint"):
+ transform = Transform(*transform)
+ n_contours = len(self.contours)
+ n_points = sum((len(contour.points) for contour in self.contours))
+ points = []
+ for contour in self.contours:
+ for point in contour.points:
+ point = transform.transformPoint(point)
+ points.append(
+ FT_Vector(
+ FT_Pos(otRound(point[0] * 64)), FT_Pos(otRound(point[1] * 64))
+ )
+ )
+ tags = []
+ for contour in self.contours:
+ for tag in contour.tags:
+ tags.append(tag)
+ contours = []
+ contours_sum = 0
+ for contour in self.contours:
+ contours_sum += len(contour.points)
+ contours.append(contours_sum - 1)
+ flags = FT_OUTLINE_EVEN_ODD_FILL if evenOdd else FT_OUTLINE_NONE
+ return FT_Outline(
+ (ctypes.c_short)(n_contours),
+ (ctypes.c_short)(n_points),
+ (FT_Vector * n_points)(*points),
+ (ctypes.c_ubyte * n_points)(*tags),
+ (ctypes.c_short * n_contours)(*contours),
+ (ctypes.c_int)(flags),
+ )
+
+ def buffer(
+ self, width=None, height=None, transform=None, contain=False, evenOdd=False
+ ):
+ """Renders the current contours within a bitmap buffer.
+
+ Args:
+ width: Image width of the bitmap in pixels. If omitted, it
+ automatically fits to the bounding box of the contours.
+ height: Image height of the bitmap in pixels. If omitted, it
+ automatically fits to the bounding box of the contours.
+ transform: An optional 6-tuple containing an affine transformation,
+ or a ``Transform`` object from the ``fontTools.misc.transform``
+ module. The bitmap size is not affected by this matrix.
+ contain: If ``True``, the image size will be automatically expanded
+ so that it fits to the bounding box of the paths. Useful for
+ rendering glyphs with negative sidebearings without clipping.
+ evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
+
+ Returns:
+ A tuple of ``(buffer, size)``, where ``buffer`` is a ``bytes``
+ object of the resulted bitmap and ``size`` is a 2-tuple of its
+ dimension.
+
+ :Notes:
+ The image size should always be given explicitly if you need to get
+ a proper glyph image. When ``width`` and ``height`` are omitted, it
+ forcifully fits to the bounding box and the side bearings get
+ cropped. If you pass ``0`` to both ``width`` and ``height`` and set
+ ``contain`` to ``True``, it expands to the bounding box while
+ maintaining the origin of the contours, meaning that LSB will be
+ maintained but RSB won’t. The difference between the two becomes
+ more obvious when rotate or skew transformation is applied.
+
+ :Example:
+ .. code-block::
+
+ >> pen = FreeTypePen(None)
+ >> glyph.draw(pen)
+ >> buf, size = pen.buffer(width=500, height=1000)
+ >> type(buf), len(buf), size
+ (<class 'bytes'>, 500000, (500, 1000))
+
+ """
+ transform = transform or Transform()
+ if not hasattr(transform, "transformPoint"):
+ transform = Transform(*transform)
+ contain_x, contain_y = contain or width is None, contain or height is None
+ if contain_x or contain_y:
+ dx, dy = transform.dx, transform.dy
+ bbox = self.bbox
+ p1, p2, p3, p4 = (
+ transform.transformPoint((bbox[0], bbox[1])),
+ transform.transformPoint((bbox[2], bbox[1])),
+ transform.transformPoint((bbox[0], bbox[3])),
+ transform.transformPoint((bbox[2], bbox[3])),
+ )
+ px, py = (p1[0], p2[0], p3[0], p4[0]), (p1[1], p2[1], p3[1], p4[1])
+ if contain_x:
+ if width is None:
+ dx = dx - min(*px)
+ width = max(*px) - min(*px)
+ else:
+ dx = dx - min(min(*px), 0.0)
+ width = max(width, max(*px) - min(min(*px), 0.0))
+ if contain_y:
+ if height is None:
+ dy = dy - min(*py)
+ height = max(*py) - min(*py)
+ else:
+ dy = dy - min(min(*py), 0.0)
+ height = max(height, max(*py) - min(min(*py), 0.0))
+ transform = Transform(*transform[:4], dx, dy)
+ width, height = math.ceil(width), math.ceil(height)
+ buf = ctypes.create_string_buffer(width * height)
+ bitmap = FT_Bitmap(
+ (ctypes.c_int)(height),
+ (ctypes.c_int)(width),
+ (ctypes.c_int)(width),
+ (ctypes.POINTER(ctypes.c_ubyte))(buf),
+ (ctypes.c_short)(256),
+ (ctypes.c_ubyte)(FT_PIXEL_MODE_GRAY),
+ (ctypes.c_char)(0),
+ (ctypes.c_void_p)(None),
+ )
+ outline = self.outline(transform=transform, evenOdd=evenOdd)
+ err = FT_Outline_Get_Bitmap(
+ freetype.get_handle(), ctypes.byref(outline), ctypes.byref(bitmap)
+ )
+ if err != 0:
+ raise FT_Exception(err)
+ return buf.raw, (width, height)
+
+ def array(
+ self, width=None, height=None, transform=None, contain=False, evenOdd=False
+ ):
+ """Returns the rendered contours as a numpy array. Requires `numpy`.
+
+ Args:
+ width: Image width of the bitmap in pixels. If omitted, it
+ automatically fits to the bounding box of the contours.
+ height: Image height of the bitmap in pixels. If omitted, it
+ automatically fits to the bounding box of the contours.
+ transform: An optional 6-tuple containing an affine transformation,
+ or a ``Transform`` object from the ``fontTools.misc.transform``
+ module. The bitmap size is not affected by this matrix.
+ contain: If ``True``, the image size will be automatically expanded
+ so that it fits to the bounding box of the paths. Useful for
+ rendering glyphs with negative sidebearings without clipping.
+ evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
+
+ Returns:
+ A ``numpy.ndarray`` object with a shape of ``(height, width)``.
+ Each element takes a value in the range of ``[0.0, 1.0]``.
+
+ :Notes:
+ The image size should always be given explicitly if you need to get
+ a proper glyph image. When ``width`` and ``height`` are omitted, it
+ forcifully fits to the bounding box and the side bearings get
+ cropped. If you pass ``0`` to both ``width`` and ``height`` and set
+ ``contain`` to ``True``, it expands to the bounding box while
+ maintaining the origin of the contours, meaning that LSB will be
+ maintained but RSB won’t. The difference between the two becomes
+ more obvious when rotate or skew transformation is applied.
+
+ :Example:
+ .. code-block::
+
+ >> pen = FreeTypePen(None)
+ >> glyph.draw(pen)
+ >> arr = pen.array(width=500, height=1000)
+ >> type(a), a.shape
+ (<class 'numpy.ndarray'>, (1000, 500))
+ """
+ import numpy as np
+
+ buf, size = self.buffer(
+ width=width,
+ height=height,
+ transform=transform,
+ contain=contain,
+ evenOdd=evenOdd,
+ )
+ return np.frombuffer(buf, "B").reshape((size[1], size[0])) / 255.0
+
+ def show(
+ self, width=None, height=None, transform=None, contain=False, evenOdd=False
+ ):
+ """Plots the rendered contours with `pyplot`. Requires `numpy` and
+ `matplotlib`.
+
+ Args:
+ width: Image width of the bitmap in pixels. If omitted, it
+ automatically fits to the bounding box of the contours.
+ height: Image height of the bitmap in pixels. If omitted, it
+ automatically fits to the bounding box of the contours.
+ transform: An optional 6-tuple containing an affine transformation,
+ or a ``Transform`` object from the ``fontTools.misc.transform``
+ module. The bitmap size is not affected by this matrix.
+ contain: If ``True``, the image size will be automatically expanded
+ so that it fits to the bounding box of the paths. Useful for
+ rendering glyphs with negative sidebearings without clipping.
+ evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
+
+ :Notes:
+ The image size should always be given explicitly if you need to get
+ a proper glyph image. When ``width`` and ``height`` are omitted, it
+ forcifully fits to the bounding box and the side bearings get
+ cropped. If you pass ``0`` to both ``width`` and ``height`` and set
+ ``contain`` to ``True``, it expands to the bounding box while
+ maintaining the origin of the contours, meaning that LSB will be
+ maintained but RSB won’t. The difference between the two becomes
+ more obvious when rotate or skew transformation is applied.
+
+ :Example:
+ .. code-block::
+
+ >> pen = FreeTypePen(None)
+ >> glyph.draw(pen)
+ >> pen.show(width=500, height=1000)
+ """
+ from matplotlib import pyplot as plt
+
+ a = self.array(
+ width=width,
+ height=height,
+ transform=transform,
+ contain=contain,
+ evenOdd=evenOdd,
+ )
+ plt.imshow(a, cmap="gray_r", vmin=0, vmax=1)
+ plt.show()
+
+ def image(
+ self, width=None, height=None, transform=None, contain=False, evenOdd=False
+ ):
+ """Returns the rendered contours as a PIL image. Requires `Pillow`.
+ Can be used to display a glyph image in Jupyter Notebook.
+
+ Args:
+ width: Image width of the bitmap in pixels. If omitted, it
+ automatically fits to the bounding box of the contours.
+ height: Image height of the bitmap in pixels. If omitted, it
+ automatically fits to the bounding box of the contours.
+ transform: An optional 6-tuple containing an affine transformation,
+ or a ``Transform`` object from the ``fontTools.misc.transform``
+ module. The bitmap size is not affected by this matrix.
+ contain: If ``True``, the image size will be automatically expanded
+ so that it fits to the bounding box of the paths. Useful for
+ rendering glyphs with negative sidebearings without clipping.
+ evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
+
+ Returns:
+ A ``PIL.image`` object. The image is filled in black with alpha
+ channel obtained from the rendered bitmap.
+
+ :Notes:
+ The image size should always be given explicitly if you need to get
+ a proper glyph image. When ``width`` and ``height`` are omitted, it
+ forcifully fits to the bounding box and the side bearings get
+ cropped. If you pass ``0`` to both ``width`` and ``height`` and set
+ ``contain`` to ``True``, it expands to the bounding box while
+ maintaining the origin of the contours, meaning that LSB will be
+ maintained but RSB won’t. The difference between the two becomes
+ more obvious when rotate or skew transformation is applied.
+
+ :Example:
+ .. code-block::
+
+ >> pen = FreeTypePen(None)
+ >> glyph.draw(pen)
+ >> img = pen.image(width=500, height=1000)
+ >> type(img), img.size
+ (<class 'PIL.Image.Image'>, (500, 1000))
+ """
+ from PIL import Image
+
+ buf, size = self.buffer(
+ width=width,
+ height=height,
+ transform=transform,
+ contain=contain,
+ evenOdd=evenOdd,
+ )
+ img = Image.new("L", size, 0)
+ img.putalpha(Image.frombuffer("L", size, buf))
+ return img
+
+ @property
+ def bbox(self):
+ """Computes the exact bounding box of an outline.
+
+ Returns:
+ A tuple of ``(xMin, yMin, xMax, yMax)``.
+ """
+ bbox = FT_BBox()
+ outline = self.outline()
+ FT_Outline_Get_BBox(ctypes.byref(outline), ctypes.byref(bbox))
+ return (bbox.xMin / 64.0, bbox.yMin / 64.0, bbox.xMax / 64.0, bbox.yMax / 64.0)
+
+ @property
+ def cbox(self):
+ """Returns an outline's ‘control box’.
+
+ Returns:
+ A tuple of ``(xMin, yMin, xMax, yMax)``.
+ """
+ cbox = FT_BBox()
+ outline = self.outline()
+ FT_Outline_Get_CBox(ctypes.byref(outline), ctypes.byref(cbox))
+ return (cbox.xMin / 64.0, cbox.yMin / 64.0, cbox.xMax / 64.0, cbox.yMax / 64.0)
+
+ def _moveTo(self, pt):
+ contour = Contour([], [])
+ self.contours.append(contour)
+ contour.points.append(pt)
+ contour.tags.append(FT_CURVE_TAG_ON)
+
+ def _lineTo(self, pt):
+ if not (self.contours and len(self.contours[-1].points) > 0):
+ raise PenError("Contour missing required initial moveTo")
+ contour = self.contours[-1]
+ contour.points.append(pt)
+ contour.tags.append(FT_CURVE_TAG_ON)
+
+ def _curveToOne(self, p1, p2, p3):
+ if not (self.contours and len(self.contours[-1].points) > 0):
+ raise PenError("Contour missing required initial moveTo")
+ t1, t2, t3 = FT_CURVE_TAG_CUBIC, FT_CURVE_TAG_CUBIC, FT_CURVE_TAG_ON
+ contour = self.contours[-1]
+ for p, t in ((p1, t1), (p2, t2), (p3, t3)):
+ contour.points.append(p)
+ contour.tags.append(t)
+
+ def _qCurveToOne(self, p1, p2):
+ if not (self.contours and len(self.contours[-1].points) > 0):
+ raise PenError("Contour missing required initial moveTo")
+ t1, t2 = FT_CURVE_TAG_CONIC, FT_CURVE_TAG_ON
+ contour = self.contours[-1]
+ for p, t in ((p1, t1), (p2, t2)):
+ contour.points.append(p)
+ contour.tags.append(t)
diff --git a/Lib/fontTools/pens/pointPen.py b/Lib/fontTools/pens/pointPen.py
index 26f99d41..4c3148bf 100644
--- a/Lib/fontTools/pens/pointPen.py
+++ b/Lib/fontTools/pens/pointPen.py
@@ -15,7 +15,7 @@ For instance, whether or not a point is smooth, and its name.
import math
from typing import Any, Optional, Tuple
-from fontTools.pens.basePen import AbstractPen
+from fontTools.pens.basePen import AbstractPen, PenError
__all__ = [
"AbstractPointPen",
@@ -74,7 +74,8 @@ class BasePointToSegmentPen(AbstractPointPen):
self.currentPath = None
def beginPath(self, identifier=None, **kwargs):
- assert self.currentPath is None
+ if self.currentPath is not None:
+ raise PenError("Path already begun.")
self.currentPath = []
def _flushContour(self, segments):
@@ -106,7 +107,8 @@ class BasePointToSegmentPen(AbstractPointPen):
raise NotImplementedError
def endPath(self):
- assert self.currentPath is not None
+ if self.currentPath is None:
+ raise PenError("Path not begun.")
points = self.currentPath
self.currentPath = None
if not points:
@@ -154,6 +156,8 @@ class BasePointToSegmentPen(AbstractPointPen):
def addPoint(self, pt, segmentType=None, smooth=False, name=None,
identifier=None, **kwargs):
+ if self.currentPath is None:
+ raise PenError("Path not begun")
self.currentPath.append((pt, segmentType, smooth, name, kwargs))
@@ -161,6 +165,9 @@ class PointToSegmentPen(BasePointToSegmentPen):
"""
Adapter class that converts the PointPen protocol to the
(Segment)Pen protocol.
+
+ NOTE: The segment pen does not support and will drop point names, identifiers
+ and kwargs.
"""
def __init__(self, segmentPen, outputImpliedClosingLine=False):
@@ -169,21 +176,23 @@ class PointToSegmentPen(BasePointToSegmentPen):
self.outputImpliedClosingLine = outputImpliedClosingLine
def _flushContour(self, segments):
- assert len(segments) >= 1
+ if not segments:
+ raise PenError("Must have at least one segment.")
pen = self.pen
if segments[0][0] == "move":
# It's an open path.
closed = False
points = segments[0][1]
- assert len(points) == 1, "illegal move segment point count: %d" % len(points)
- movePt, smooth, name, kwargs = points[0]
+ if len(points) != 1:
+ raise PenError(f"Illegal move segment point count: {len(points)}")
+ movePt, _, _ , _ = points[0]
del segments[0]
else:
# It's a closed path, do a moveTo to the last
# point of the last segment.
closed = True
segmentType, points = segments[-1]
- movePt, smooth, name, kwargs = points[-1]
+ movePt, _, _ , _ = points[-1]
if movePt is None:
# quad special case: a contour with no on-curve points contains
# one "qcurve" segment that ends with a point that's None. We
@@ -196,9 +205,10 @@ class PointToSegmentPen(BasePointToSegmentPen):
lastPt = movePt
for i in range(nSegments):
segmentType, points = segments[i]
- points = [pt for pt, smooth, name, kwargs in points]
+ points = [pt for pt, _, _ , _ in points]
if segmentType == "line":
- assert len(points) == 1, "illegal line segment point count: %d" % len(points)
+ if len(points) != 1:
+ raise PenError(f"Illegal line segment point count: {len(points)}")
pt = points[0]
# For closed contours, a 'lineTo' is always implied from the last oncurve
# point to the starting point, thus we can omit it when the last and
@@ -224,7 +234,7 @@ class PointToSegmentPen(BasePointToSegmentPen):
pen.qCurveTo(*points)
lastPt = points[-1]
else:
- assert 0, "illegal segmentType: %s" % segmentType
+ raise PenError(f"Illegal segmentType: {segmentType}")
if closed:
pen.closePath()
else:
@@ -232,6 +242,7 @@ class PointToSegmentPen(BasePointToSegmentPen):
def addComponent(self, glyphName, transform, identifier=None, **kwargs):
del identifier # unused
+ del kwargs # unused
self.pen.addComponent(glyphName, transform)
@@ -260,27 +271,35 @@ class SegmentToPointPen(AbstractPen):
self.contour.append((pt, "move"))
def lineTo(self, pt):
- assert self.contour is not None, "contour missing required initial moveTo"
+ if self.contour is None:
+ raise PenError("Contour missing required initial moveTo")
self.contour.append((pt, "line"))
def curveTo(self, *pts):
- assert self.contour is not None, "contour missing required initial moveTo"
+ if not pts:
+ raise TypeError("Must pass in at least one point")
+ if self.contour is None:
+ raise PenError("Contour missing required initial moveTo")
for pt in pts[:-1]:
self.contour.append((pt, None))
self.contour.append((pts[-1], "curve"))
def qCurveTo(self, *pts):
+ if not pts:
+ raise TypeError("Must pass in at least one point")
if pts[-1] is None:
self.contour = []
else:
- assert self.contour is not None, "contour missing required initial moveTo"
+ if self.contour is None:
+ raise PenError("Contour missing required initial moveTo")
for pt in pts[:-1]:
self.contour.append((pt, None))
if pts[-1] is not None:
self.contour.append((pts[-1], "qcurve"))
def closePath(self):
- assert self.contour is not None, "contour missing required initial moveTo"
+ if self.contour is None:
+ raise PenError("Contour missing required initial moveTo")
if len(self.contour) > 1 and self.contour[0][0] == self.contour[-1][0]:
self.contour[0] = self.contour[-1]
del self.contour[-1]
@@ -294,12 +313,14 @@ class SegmentToPointPen(AbstractPen):
self.contour = None
def endPath(self):
- assert self.contour is not None, "contour missing required initial moveTo"
+ if self.contour is None:
+ raise PenError("Contour missing required initial moveTo")
self._flushContour()
self.contour = None
def addComponent(self, glyphName, transform):
- assert self.contour is None
+ if self.contour is not None:
+ raise PenError("Components must be added before or after contours")
self.pen.addComponent(glyphName, transform)
@@ -309,11 +330,14 @@ class GuessSmoothPointPen(AbstractPointPen):
should be "smooth", ie. that it's a "tangent" point or a "curve" point.
"""
- def __init__(self, outPen):
+ def __init__(self, outPen, error=0.05):
self._outPen = outPen
+ self._error = error
self._points = None
def _flushContour(self):
+ if self._points is None:
+ raise PenError("Path not begun")
points = self._points
nPoints = len(points)
if not nPoints:
@@ -329,7 +353,7 @@ class GuessSmoothPointPen(AbstractPointPen):
# closed path containing 1 point (!), ignore.
indices = []
for i in indices:
- pt, segmentType, dummy, name, kwargs = points[i]
+ pt, segmentType, _, name, kwargs = points[i]
if segmentType is None:
continue
prev = i - 1
@@ -343,16 +367,17 @@ class GuessSmoothPointPen(AbstractPointPen):
if pt != prevPt and pt != nextPt:
dx1, dy1 = pt[0] - prevPt[0], pt[1] - prevPt[1]
dx2, dy2 = nextPt[0] - pt[0], nextPt[1] - pt[1]
- a1 = math.atan2(dx1, dy1)
- a2 = math.atan2(dx2, dy2)
- if abs(a1 - a2) < 0.05:
+ a1 = math.atan2(dy1, dx1)
+ a2 = math.atan2(dy2, dx2)
+ if abs(a1 - a2) < self._error:
points[i] = pt, segmentType, True, name, kwargs
for pt, segmentType, smooth, name, kwargs in points:
self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs)
def beginPath(self, identifier=None, **kwargs):
- assert self._points is None
+ if self._points is not None:
+ raise PenError("Path already begun")
self._points = []
if identifier is not None:
kwargs["identifier"] = identifier
@@ -365,12 +390,15 @@ class GuessSmoothPointPen(AbstractPointPen):
def addPoint(self, pt, segmentType=None, smooth=False, name=None,
identifier=None, **kwargs):
+ if self._points is None:
+ raise PenError("Path not begun")
if identifier is not None:
kwargs["identifier"] = identifier
self._points.append((pt, segmentType, False, name, kwargs))
def addComponent(self, glyphName, transformation, identifier=None, **kwargs):
- assert self._points is None
+ if self._points is not None:
+ raise PenError("Components must be added before or after contours")
if identifier is not None:
kwargs["identifier"] = identifier
self._outPen.addComponent(glyphName, transformation, **kwargs)
@@ -440,19 +468,26 @@ class ReverseContourPointPen(AbstractPointPen):
pen.endPath()
def beginPath(self, identifier=None, **kwargs):
- assert self.currentContour is None
+ if self.currentContour is not None:
+ raise PenError("Path already begun")
self.currentContour = []
self.currentContourIdentifier = identifier
self.onCurve = []
def endPath(self):
- assert self.currentContour is not None
+ if self.currentContour is None:
+ raise PenError("Path not begun")
self._flushContour()
self.currentContour = None
- def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
+ def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
+ if self.currentContour is None:
+ raise PenError("Path not begun")
+ if identifier is not None:
+ kwargs["identifier"] = identifier
self.currentContour.append((pt, segmentType, smooth, name, kwargs))
def addComponent(self, glyphName, transform, identifier=None, **kwargs):
- assert self.currentContour is None
+ if self.currentContour is not None:
+ raise PenError("Components must be added before or after contours")
self.pen.addComponent(glyphName, transform, identifier=identifier, **kwargs)
diff --git a/Lib/fontTools/pens/recordingPen.py b/Lib/fontTools/pens/recordingPen.py
index 99e87e5a..70f05e83 100644
--- a/Lib/fontTools/pens/recordingPen.py
+++ b/Lib/fontTools/pens/recordingPen.py
@@ -29,21 +29,21 @@ class RecordingPen(AbstractPen):
The recording can be accessed as pen.value; or replayed using
pen.replay(otherPen).
- Usage example:
- ==============
- from fontTools.ttLib import TTFont
- from fontTools.pens.recordingPen import RecordingPen
+ :Example:
- glyph_name = 'dollar'
- font_path = 'MyFont.otf'
+ from fontTools.ttLib import TTFont
+ from fontTools.pens.recordingPen import RecordingPen
- font = TTFont(font_path)
- glyphset = font.getGlyphSet()
- glyph = glyphset[glyph_name]
+ glyph_name = 'dollar'
+ font_path = 'MyFont.otf'
- pen = RecordingPen()
- glyph.draw(pen)
- print(pen.value)
+ font = TTFont(font_path)
+ glyphset = font.getGlyphSet()
+ glyph = glyphset[glyph_name]
+
+ pen = RecordingPen()
+ glyph.draw(pen)
+ print(pen.value)
"""
def __init__(self):
@@ -72,23 +72,23 @@ class DecomposingRecordingPen(DecomposingPen, RecordingPen):
The constructor takes a single 'glyphSet' positional argument,
a dictionary of glyph objects (i.e. with a 'draw' method) keyed
- by thir name.
-
- >>> class SimpleGlyph(object):
- ... def draw(self, pen):
- ... pen.moveTo((0, 0))
- ... pen.curveTo((1, 1), (2, 2), (3, 3))
- ... pen.closePath()
- >>> class CompositeGlyph(object):
- ... def draw(self, pen):
- ... pen.addComponent('a', (1, 0, 0, 1, -1, 1))
- >>> glyphSet = {'a': SimpleGlyph(), 'b': CompositeGlyph()}
- >>> for name, glyph in sorted(glyphSet.items()):
- ... pen = DecomposingRecordingPen(glyphSet)
- ... glyph.draw(pen)
- ... print("{}: {}".format(name, pen.value))
- a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())]
- b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())]
+ by thir name::
+
+ >>> class SimpleGlyph(object):
+ ... def draw(self, pen):
+ ... pen.moveTo((0, 0))
+ ... pen.curveTo((1, 1), (2, 2), (3, 3))
+ ... pen.closePath()
+ >>> class CompositeGlyph(object):
+ ... def draw(self, pen):
+ ... pen.addComponent('a', (1, 0, 0, 1, -1, 1))
+ >>> glyphSet = {'a': SimpleGlyph(), 'b': CompositeGlyph()}
+ >>> for name, glyph in sorted(glyphSet.items()):
+ ... pen = DecomposingRecordingPen(glyphSet)
+ ... glyph.draw(pen)
+ ... print("{}: {}".format(name, pen.value))
+ a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())]
+ b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())]
"""
# raises KeyError if base glyph is not found in glyphSet
skipMissingComponents = False
@@ -100,38 +100,44 @@ class RecordingPointPen(AbstractPointPen):
The recording can be accessed as pen.value; or replayed using
pointPen.replay(otherPointPen).
- Usage example:
- ==============
- from defcon import Font
- from fontTools.pens.recordingPen import RecordingPointPen
+ :Example:
+
+ from defcon import Font
+ from fontTools.pens.recordingPen import RecordingPointPen
- glyph_name = 'a'
- font_path = 'MyFont.ufo'
+ glyph_name = 'a'
+ font_path = 'MyFont.ufo'
- font = Font(font_path)
- glyph = font[glyph_name]
+ font = Font(font_path)
+ glyph = font[glyph_name]
- pen = RecordingPointPen()
- glyph.drawPoints(pen)
- print(pen.value)
+ pen = RecordingPointPen()
+ glyph.drawPoints(pen)
+ print(pen.value)
- new_glyph = font.newGlyph('b')
- pen.replay(new_glyph.getPointPen())
+ new_glyph = font.newGlyph('b')
+ pen.replay(new_glyph.getPointPen())
"""
def __init__(self):
self.value = []
- def beginPath(self, **kwargs):
+ def beginPath(self, identifier=None, **kwargs):
+ if identifier is not None:
+ kwargs["identifier"] = identifier
self.value.append(("beginPath", (), kwargs))
def endPath(self):
self.value.append(("endPath", (), {}))
- def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
+ def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
+ if identifier is not None:
+ kwargs["identifier"] = identifier
self.value.append(("addPoint", (pt, segmentType, smooth, name), kwargs))
- def addComponent(self, baseGlyphName, transformation, **kwargs):
+ def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs):
+ if identifier is not None:
+ kwargs["identifier"] = identifier
self.value.append(("addComponent", (baseGlyphName, transformation), kwargs))
def replay(self, pointPen):
diff --git a/Lib/fontTools/pens/reportLabPen.py b/Lib/fontTools/pens/reportLabPen.py
index c0a4610b..43217d42 100644
--- a/Lib/fontTools/pens/reportLabPen.py
+++ b/Lib/fontTools/pens/reportLabPen.py
@@ -7,7 +7,7 @@ __all__ = ["ReportLabPen"]
class ReportLabPen(BasePen):
- """A pen for drawing onto a reportlab.graphics.shapes.Path object."""
+ """A pen for drawing onto a ``reportlab.graphics.shapes.Path`` object."""
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
diff --git a/Lib/fontTools/pens/svgPathPen.py b/Lib/fontTools/pens/svgPathPen.py
index 4352ba47..e92737e3 100644
--- a/Lib/fontTools/pens/svgPathPen.py
+++ b/Lib/fontTools/pens/svgPathPen.py
@@ -1,18 +1,36 @@
+from typing import Callable
from fontTools.pens.basePen import BasePen
-def pointToString(pt):
- return " ".join([str(i) for i in pt])
+def pointToString(pt, ntos=str):
+ return " ".join(ntos(i) for i in pt)
class SVGPathPen(BasePen):
+ """ Pen to draw SVG path d commands.
- def __init__(self, glyphSet):
+ Example::
+ >>> pen = SVGPathPen(None)
+ >>> pen.moveTo((0, 0))
+ >>> pen.lineTo((1, 1))
+ >>> pen.curveTo((2, 2), (3, 3), (4, 4))
+ >>> pen.closePath()
+ >>> pen.getCommands()
+ 'M0 0 1 1C2 2 3 3 4 4Z'
+
+ Args:
+ glyphSet: a dictionary of drawable glyph objects keyed by name
+ used to resolve component references in composite glyphs.
+ ntos: a callable that takes a number and returns a string, to
+ customize how numbers are formatted (default: str).
+ """
+ def __init__(self, glyphSet, ntos: Callable[[float], str] = str):
BasePen.__init__(self, glyphSet)
self._commands = []
self._lastCommand = None
self._lastX = None
self._lastY = None
+ self._ntos = ntos
def _handleAnchor(self):
"""
@@ -43,7 +61,7 @@ class SVGPathPen(BasePen):
['M0 10']
"""
self._handleAnchor()
- t = "M%s" % (pointToString(pt))
+ t = "M%s" % (pointToString(pt, self._ntos))
self._commands.append(t)
self._lastCommand = "M"
self._lastX, self._lastY = pt
@@ -91,19 +109,19 @@ class SVGPathPen(BasePen):
# vertical line
elif x == self._lastX:
cmd = "V"
- pts = str(y)
+ pts = self._ntos(y)
# horizontal line
elif y == self._lastY:
cmd = "H"
- pts = str(x)
+ pts = self._ntos(x)
# previous was a moveto
elif self._lastCommand == "M":
cmd = None
- pts = " " + pointToString(pt)
+ pts = " " + pointToString(pt, self._ntos)
# basic
else:
cmd = "L"
- pts = pointToString(pt)
+ pts = pointToString(pt, self._ntos)
# write the string
t = ""
if cmd:
@@ -122,9 +140,9 @@ class SVGPathPen(BasePen):
['C10 20 30 40 50 60']
"""
t = "C"
- t += pointToString(pt1) + " "
- t += pointToString(pt2) + " "
- t += pointToString(pt3)
+ t += pointToString(pt1, self._ntos) + " "
+ t += pointToString(pt2, self._ntos) + " "
+ t += pointToString(pt3, self._ntos)
self._commands.append(t)
self._lastCommand = "C"
self._lastX, self._lastY = pt3
@@ -135,11 +153,16 @@ class SVGPathPen(BasePen):
>>> pen.qCurveTo((10, 20), (30, 40))
>>> pen._commands
['Q10 20 30 40']
+ >>> from fontTools.misc.roundTools import otRound
+ >>> pen = SVGPathPen(None, ntos=lambda v: str(otRound(v)))
+ >>> pen.qCurveTo((3, 3), (7, 5), (11, 4))
+ >>> pen._commands
+ ['Q3 3 5 4', 'Q7 5 11 4']
"""
assert pt2 is not None
t = "Q"
- t += pointToString(pt1) + " "
- t += pointToString(pt2)
+ t += pointToString(pt1, self._ntos) + " "
+ t += pointToString(pt2, self._ntos)
self._commands.append(t)
self._lastCommand = "Q"
self._lastX, self._lastY = pt2
diff --git a/Lib/fontTools/pens/transformPen.py b/Lib/fontTools/pens/transformPen.py
index 2dcf83b1..93d19191 100644
--- a/Lib/fontTools/pens/transformPen.py
+++ b/Lib/fontTools/pens/transformPen.py
@@ -1,7 +1,7 @@
from fontTools.pens.filterPen import FilterPen, FilterPointPen
-__all__ = ["TransformPen"]
+__all__ = ["TransformPen", "TransformPointPen"]
class TransformPen(FilterPen):
diff --git a/Lib/fontTools/pens/ttGlyphPen.py b/Lib/fontTools/pens/ttGlyphPen.py
index e7841efc..5087e158 100644
--- a/Lib/fontTools/pens/ttGlyphPen.py
+++ b/Lib/fontTools/pens/ttGlyphPen.py
@@ -1,30 +1,31 @@
from array import array
-from fontTools.misc.fixedTools import MAX_F2DOT14, otRound, floatToFixedToFloat
+from typing import Any, Dict, Optional, Tuple
+from fontTools.misc.fixedTools import MAX_F2DOT14, floatToFixedToFloat
+from fontTools.misc.loggingTools import LogMixin
+from fontTools.pens.pointPen import AbstractPointPen
from fontTools.misc.roundTools import otRound
-from fontTools.pens.basePen import LoggingPen
-from fontTools.pens.transformPen import TransformPen
+from fontTools.pens.basePen import LoggingPen, PenError
+from fontTools.pens.transformPen import TransformPen, TransformPointPen
from fontTools.ttLib.tables import ttProgram
from fontTools.ttLib.tables._g_l_y_f import Glyph
from fontTools.ttLib.tables._g_l_y_f import GlyphComponent
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
-__all__ = ["TTGlyphPen"]
+__all__ = ["TTGlyphPen", "TTGlyphPointPen"]
-class TTGlyphPen(LoggingPen):
- """Pen used for drawing to a TrueType glyph.
-
- This pen can be used to construct or modify glyphs in a TrueType format
- font. After using the pen to draw, use the ``.glyph()`` method to retrieve
- a :py:class:`~._g_l_y_f.Glyph` object representing the glyph.
- """
-
- def __init__(self, glyphSet, handleOverflowingTransforms=True):
- """Construct a new pen.
+class _TTGlyphBasePen:
+ def __init__(
+ self,
+ glyphSet: Optional[Dict[str, Any]],
+ handleOverflowingTransforms: bool = True,
+ ) -> None:
+ """
+ Construct a new pen.
Args:
- glyphSet (ttLib._TTGlyphSet): A glyphset object, used to resolve components.
+ glyphSet (Dict[str, Any]): A glyphset object, used to resolve components.
handleOverflowingTransforms (bool): See below.
If ``handleOverflowingTransforms`` is True, the components' transform values
@@ -42,41 +43,152 @@ class TTGlyphPen(LoggingPen):
If False, no check is done and all components are translated unmodified
into the glyf table, followed by an inevitable ``struct.error`` once an
attempt is made to compile them.
+
+ If both contours and components are present in a glyph, the components
+ are decomposed.
"""
self.glyphSet = glyphSet
self.handleOverflowingTransforms = handleOverflowingTransforms
self.init()
- def init(self):
+ def _decompose(
+ self,
+ glyphName: str,
+ transformation: Tuple[float, float, float, float, float, float],
+ ):
+ tpen = self.transformPen(self, transformation)
+ getattr(self.glyphSet[glyphName], self.drawMethod)(tpen)
+
+ def _isClosed(self):
+ """
+ Check if the current path is closed.
+ """
+ raise NotImplementedError
+
+ def init(self) -> None:
self.points = []
self.endPts = []
self.types = []
self.components = []
- def _addPoint(self, pt, onCurve):
+ def addComponent(
+ self,
+ baseGlyphName: str,
+ transformation: Tuple[float, float, float, float, float, float],
+ identifier: Optional[str] = None,
+ **kwargs: Any,
+ ) -> None:
+ """
+ Add a sub glyph.
+ """
+ self.components.append((baseGlyphName, transformation))
+
+ def _buildComponents(self, componentFlags):
+ if self.handleOverflowingTransforms:
+ # we can't encode transform values > 2 or < -2 in F2Dot14,
+ # so we must decompose the glyph if any transform exceeds these
+ overflowing = any(
+ s > 2 or s < -2
+ for (glyphName, transformation) in self.components
+ for s in transformation[:4]
+ )
+ components = []
+ for glyphName, transformation in self.components:
+ if glyphName not in self.glyphSet:
+ self.log.warning(f"skipped non-existing component '{glyphName}'")
+ continue
+ if self.points or (self.handleOverflowingTransforms and overflowing):
+ # can't have both coordinates and components, so decompose
+ self._decompose(glyphName, transformation)
+ continue
+
+ component = GlyphComponent()
+ component.glyphName = glyphName
+ component.x, component.y = (otRound(v) for v in transformation[4:])
+ # quantize floats to F2Dot14 so we get same values as when decompiled
+ # from a binary glyf table
+ transformation = tuple(
+ floatToFixedToFloat(v, 14) for v in transformation[:4]
+ )
+ if transformation != (1, 0, 0, 1):
+ if self.handleOverflowingTransforms and any(
+ MAX_F2DOT14 < s <= 2 for s in transformation
+ ):
+ # clamp values ~= +2.0 so we can keep the component
+ transformation = tuple(
+ MAX_F2DOT14 if MAX_F2DOT14 < s <= 2 else s
+ for s in transformation
+ )
+ component.transform = (transformation[:2], transformation[2:])
+ component.flags = componentFlags
+ components.append(component)
+ return components
+
+ def glyph(self, componentFlags: int = 0x4) -> Glyph:
+ """
+ Returns a :py:class:`~._g_l_y_f.Glyph` object representing the glyph.
+ """
+ if not self._isClosed():
+ raise PenError("Didn't close last contour.")
+ components = self._buildComponents(componentFlags)
+
+ glyph = Glyph()
+ glyph.coordinates = GlyphCoordinates(self.points)
+ glyph.coordinates.toInt()
+ glyph.endPtsOfContours = self.endPts
+ glyph.flags = array("B", self.types)
+ self.init()
+
+ if components:
+ # If both components and contours were present, they have by now
+ # been decomposed by _buildComponents.
+ glyph.components = components
+ glyph.numberOfContours = -1
+ else:
+ glyph.numberOfContours = len(glyph.endPtsOfContours)
+ glyph.program = ttProgram.Program()
+ glyph.program.fromBytecode(b"")
+
+ return glyph
+
+
+class TTGlyphPen(_TTGlyphBasePen, LoggingPen):
+ """
+ Pen used for drawing to a TrueType glyph.
+
+ This pen can be used to construct or modify glyphs in a TrueType format
+ font. After using the pen to draw, use the ``.glyph()`` method to retrieve
+ a :py:class:`~._g_l_y_f.Glyph` object representing the glyph.
+ """
+
+ drawMethod = "draw"
+ transformPen = TransformPen
+
+ def _addPoint(self, pt: Tuple[float, float], onCurve: int) -> None:
self.points.append(pt)
self.types.append(onCurve)
- def _popPoint(self):
+ def _popPoint(self) -> None:
self.points.pop()
self.types.pop()
- def _isClosed(self):
- return (
- (not self.points) or
- (self.endPts and self.endPts[-1] == len(self.points) - 1))
+ def _isClosed(self) -> bool:
+ return (not self.points) or (
+ self.endPts and self.endPts[-1] == len(self.points) - 1
+ )
- def lineTo(self, pt):
+ def lineTo(self, pt: Tuple[float, float]) -> None:
self._addPoint(pt, 1)
- def moveTo(self, pt):
- assert self._isClosed(), '"move"-type point must begin a new contour.'
+ def moveTo(self, pt: Tuple[float, float]) -> None:
+ if not self._isClosed():
+ raise PenError('"move"-type point must begin a new contour.')
self._addPoint(pt, 1)
- def curveTo(self, *points):
+ def curveTo(self, *points) -> None:
raise NotImplementedError
- def qCurveTo(self, *points):
+ def qCurveTo(self, *points) -> None:
assert len(points) >= 1
for pt in points[:-1]:
self._addPoint(pt, 0)
@@ -85,7 +197,7 @@ class TTGlyphPen(LoggingPen):
if points[-1] is not None:
self._addPoint(points[-1], 1)
- def closePath(self):
+ def closePath(self) -> None:
endPt = len(self.points) - 1
# ignore anchors (one-point paths)
@@ -103,72 +215,71 @@ class TTGlyphPen(LoggingPen):
self.endPts.append(endPt)
- def endPath(self):
+ def endPath(self) -> None:
# TrueType contours are always "closed"
self.closePath()
- def addComponent(self, glyphName, transformation):
- self.components.append((glyphName, transformation))
- def _buildComponents(self, componentFlags):
- if self.handleOverflowingTransforms:
- # we can't encode transform values > 2 or < -2 in F2Dot14,
- # so we must decompose the glyph if any transform exceeds these
- overflowing = any(s > 2 or s < -2
- for (glyphName, transformation) in self.components
- for s in transformation[:4])
- components = []
- for glyphName, transformation in self.components:
- if glyphName not in self.glyphSet:
- self.log.warning(
- "skipped non-existing component '%s'", glyphName
- )
- continue
- if (self.points or
- (self.handleOverflowingTransforms and overflowing)):
- # can't have both coordinates and components, so decompose
- tpen = TransformPen(self, transformation)
- self.glyphSet[glyphName].draw(tpen)
- continue
+class TTGlyphPointPen(_TTGlyphBasePen, LogMixin, AbstractPointPen):
+ """
+ Point pen used for drawing to a TrueType glyph.
- component = GlyphComponent()
- component.glyphName = glyphName
- component.x, component.y = (otRound(v) for v in transformation[4:])
- # quantize floats to F2Dot14 so we get same values as when decompiled
- # from a binary glyf table
- transformation = tuple(
- floatToFixedToFloat(v, 14) for v in transformation[:4]
- )
- if transformation != (1, 0, 0, 1):
- if (self.handleOverflowingTransforms and
- any(MAX_F2DOT14 < s <= 2 for s in transformation)):
- # clamp values ~= +2.0 so we can keep the component
- transformation = tuple(MAX_F2DOT14 if MAX_F2DOT14 < s <= 2
- else s for s in transformation)
- component.transform = (transformation[:2], transformation[2:])
- component.flags = componentFlags
- components.append(component)
- return components
+ This pen can be used to construct or modify glyphs in a TrueType format
+ font. After using the pen to draw, use the ``.glyph()`` method to retrieve
+ a :py:class:`~._g_l_y_f.Glyph` object representing the glyph.
+ """
- def glyph(self, componentFlags=0x4):
- """Returns a :py:class:`~._g_l_y_f.Glyph` object representing the glyph."""
- assert self._isClosed(), "Didn't close last contour."
+ drawMethod = "drawPoints"
+ transformPen = TransformPointPen
- components = self._buildComponents(componentFlags)
+ def init(self) -> None:
+ super().init()
+ self._currentContourStartIndex = None
- glyph = Glyph()
- glyph.coordinates = GlyphCoordinates(self.points)
- glyph.coordinates.toInt()
- glyph.endPtsOfContours = self.endPts
- glyph.flags = array("B", self.types)
- self.init()
+ def _isClosed(self) -> bool:
+ return self._currentContourStartIndex is None
- if components:
- glyph.components = components
- glyph.numberOfContours = -1
+ def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ Start a new sub path.
+ """
+ if not self._isClosed():
+ raise PenError("Didn't close previous contour.")
+ self._currentContourStartIndex = len(self.points)
+
+ def endPath(self) -> None:
+ """
+ End the current sub path.
+ """
+ # TrueType contours are always "closed"
+ if self._isClosed():
+ raise PenError("Contour is already closed.")
+ if self._currentContourStartIndex == len(self.points):
+ raise PenError("Tried to end an empty contour.")
+ self.endPts.append(len(self.points) - 1)
+ self._currentContourStartIndex = None
+
+ def addPoint(
+ self,
+ pt: Tuple[float, float],
+ segmentType: Optional[str] = None,
+ smooth: bool = False,
+ name: Optional[str] = None,
+ identifier: Optional[str] = None,
+ **kwargs: Any,
+ ) -> None:
+ """
+ Add a point to the current sub path.
+ """
+ if self._isClosed():
+ raise PenError("Can't add a point to a closed contour.")
+ if segmentType is None:
+ self.types.append(0) # offcurve
+ elif segmentType in ("qcurve", "line", "move"):
+ self.types.append(1) # oncurve
+ elif segmentType == "curve":
+ raise NotImplementedError("cubic curves are not supported")
else:
- glyph.numberOfContours = len(glyph.endPtsOfContours)
- glyph.program = ttProgram.Program()
- glyph.program.fromBytecode(b"")
+ raise AssertionError(segmentType)
- return glyph
+ self.points.append(pt)
diff --git a/Lib/fontTools/subset/__init__.py b/Lib/fontTools/subset/__init__.py
index f687b056..53b440da 100644
--- a/Lib/fontTools/subset/__init__.py
+++ b/Lib/fontTools/subset/__init__.py
@@ -8,12 +8,15 @@ from fontTools.ttLib.tables import otTables
from fontTools.otlLib.maxContextCalc import maxCtxFont
from fontTools.pens.basePen import NullPen
from fontTools.misc.loggingTools import Timer
+from fontTools.subset.util import _add_method, _uniq_sort
from fontTools.subset.cff import *
+from fontTools.subset.svg import *
import sys
import struct
import array
import logging
from collections import Counter, defaultdict
+from functools import reduce
from types import MethodType
__usage__ = "pyftsubset font-file [glyph...] [--option=value]..."
@@ -21,82 +24,100 @@ __usage__ = "pyftsubset font-file [glyph...] [--option=value]..."
__doc__="""\
pyftsubset -- OpenType font subsetter and optimizer
- pyftsubset is an OpenType font subsetter and optimizer, based on fontTools.
- It accepts any TT- or CFF-flavored OpenType (.otf or .ttf) or WOFF (.woff)
- font file. The subsetted glyph set is based on the specified glyphs
- or characters, and specified OpenType layout features.
-
- The tool also performs some size-reducing optimizations, aimed for using
- subset fonts as webfonts. Individual optimizations can be enabled or
- disabled, and are enabled by default when they are safe.
-
-Usage:
- """+__usage__+"""
-
- At least one glyph or one of --gids, --gids-file, --glyphs, --glyphs-file,
- --text, --text-file, --unicodes, or --unicodes-file, must be specified.
-
-Arguments:
- font-file
- The input font file.
- glyph
- Specify one or more glyph identifiers to include in the subset. Must be
- PS glyph names, or the special string '*' to keep the entire glyph set.
-
-Initial glyph set specification:
- These options populate the initial glyph set. Same option can appear
- multiple times, and the results are accummulated.
- --gids=<NNN>[,<NNN>...]
- Specify comma/whitespace-separated list of glyph IDs or ranges as
- decimal numbers. For example, --gids=10-12,14 adds glyphs with
- numbers 10, 11, 12, and 14.
- --gids-file=<path>
- Like --gids but reads from a file. Anything after a '#' on any line
- is ignored as comments.
- --glyphs=<glyphname>[,<glyphname>...]
- Specify comma/whitespace-separated PS glyph names to add to the subset.
- Note that only PS glyph names are accepted, not gidNNN, U+XXXX, etc
- that are accepted on the command line. The special string '*' will keep
- the entire glyph set.
- --glyphs-file=<path>
- Like --glyphs but reads from a file. Anything after a '#' on any line
- is ignored as comments.
- --text=<text>
- Specify characters to include in the subset, as UTF-8 string.
- --text-file=<path>
- Like --text but reads from a file. Newline character are not added to
- the subset.
- --unicodes=<XXXX>[,<XXXX>...]
- Specify comma/whitespace-separated list of Unicode codepoints or
- ranges as hex numbers, optionally prefixed with 'U+', 'u', etc.
- For example, --unicodes=41-5a,61-7a adds ASCII letters, so does
- the more verbose --unicodes=U+0041-005A,U+0061-007A.
- The special strings '*' will choose all Unicode characters mapped
- by the font.
- --unicodes-file=<path>
- Like --unicodes, but reads from a file. Anything after a '#' on any
- line in the file is ignored as comments.
- --ignore-missing-glyphs
- Do not fail if some requested glyphs or gids are not available in
- the font.
- --no-ignore-missing-glyphs
- Stop and fail if some requested glyphs or gids are not available
- in the font. [default]
- --ignore-missing-unicodes [default]
- Do not fail if some requested Unicode characters (including those
- indirectly specified using --text or --text-file) are not available
- in the font.
- --no-ignore-missing-unicodes
- Stop and fail if some requested Unicode characters are not available
- in the font.
- Note the default discrepancy between ignoring missing glyphs versus
- unicodes. This is for historical reasons and in the future
- --no-ignore-missing-unicodes might become default.
-
-Other options:
- For the other options listed below, to see the current value of the option,
- pass a value of '?' to it, with or without a '='.
- Examples:
+pyftsubset is an OpenType font subsetter and optimizer, based on fontTools.
+It accepts any TT- or CFF-flavored OpenType (.otf or .ttf) or WOFF (.woff)
+font file. The subsetted glyph set is based on the specified glyphs
+or characters, and specified OpenType layout features.
+
+The tool also performs some size-reducing optimizations, aimed for using
+subset fonts as webfonts. Individual optimizations can be enabled or
+disabled, and are enabled by default when they are safe.
+
+Usage: """+__usage__+"""
+
+At least one glyph or one of --gids, --gids-file, --glyphs, --glyphs-file,
+--text, --text-file, --unicodes, or --unicodes-file, must be specified.
+
+Args:
+
+font-file
+ The input font file.
+glyph
+ Specify one or more glyph identifiers to include in the subset. Must be
+ PS glyph names, or the special string '*' to keep the entire glyph set.
+
+Initial glyph set specification
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+These options populate the initial glyph set. Same option can appear
+multiple times, and the results are accummulated.
+
+--gids=<NNN>[,<NNN>...]
+ Specify comma/whitespace-separated list of glyph IDs or ranges as decimal
+ numbers. For example, --gids=10-12,14 adds glyphs with numbers 10, 11,
+ 12, and 14.
+
+--gids-file=<path>
+ Like --gids but reads from a file. Anything after a '#' on any line is
+ ignored as comments.
+
+--glyphs=<glyphname>[,<glyphname>...]
+ Specify comma/whitespace-separated PS glyph names to add to the subset.
+ Note that only PS glyph names are accepted, not gidNNN, U+XXXX, etc
+ that are accepted on the command line. The special string '*' will keep
+ the entire glyph set.
+
+--glyphs-file=<path>
+ Like --glyphs but reads from a file. Anything after a '#' on any line
+ is ignored as comments.
+
+--text=<text>
+ Specify characters to include in the subset, as UTF-8 string.
+
+--text-file=<path>
+ Like --text but reads from a file. Newline character are not added to
+ the subset.
+
+--unicodes=<XXXX>[,<XXXX>...]
+ Specify comma/whitespace-separated list of Unicode codepoints or
+ ranges as hex numbers, optionally prefixed with 'U+', 'u', etc.
+ For example, --unicodes=41-5a,61-7a adds ASCII letters, so does
+ the more verbose --unicodes=U+0041-005A,U+0061-007A.
+ The special strings '*' will choose all Unicode characters mapped
+ by the font.
+
+--unicodes-file=<path>
+ Like --unicodes, but reads from a file. Anything after a '#' on any
+ line in the file is ignored as comments.
+
+--ignore-missing-glyphs
+ Do not fail if some requested glyphs or gids are not available in
+ the font.
+
+--no-ignore-missing-glyphs
+ Stop and fail if some requested glyphs or gids are not available
+ in the font. [default]
+
+--ignore-missing-unicodes [default]
+ Do not fail if some requested Unicode characters (including those
+ indirectly specified using --text or --text-file) are not available
+ in the font.
+
+--no-ignore-missing-unicodes
+ Stop and fail if some requested Unicode characters are not available
+ in the font.
+ Note the default discrepancy between ignoring missing glyphs versus
+ unicodes. This is for historical reasons and in the future
+ --no-ignore-missing-unicodes might become default.
+
+Other options
+^^^^^^^^^^^^^
+
+For the other options listed below, to see the current value of the option,
+pass a value of '?' to it, with or without a '='.
+
+Examples::
+
$ pyftsubset --glyph-names?
Current setting for 'glyph-names' is: False
$ ./pyftsubset --name-IDs=?
@@ -105,239 +126,299 @@ Other options:
Current setting for 'hinting' is: True
Current setting for 'hinting' is: False
-Output options:
- --output-file=<path>
- The output font file. If not specified, the subsetted font
- will be saved in as font-file.subset.
- --flavor=<type>
- Specify flavor of output font file. May be 'woff' or 'woff2'.
- Note that WOFF2 requires the Brotli Python extension, available
- at https://github.com/google/brotli
- --with-zopfli
- Use the Google Zopfli algorithm to compress WOFF. The output is 3-8 %
- smaller than pure zlib, but the compression speed is much slower.
- The Zopfli Python bindings are available at:
- https://pypi.python.org/pypi/zopfli
-
-Glyph set expansion:
- These options control how additional glyphs are added to the subset.
- --retain-gids
- Retain glyph indices; just empty glyphs not needed in-place.
- --notdef-glyph
- Add the '.notdef' glyph to the subset (ie, keep it). [default]
- --no-notdef-glyph
- Drop the '.notdef' glyph unless specified in the glyph set. This
- saves a few bytes, but is not possible for Postscript-flavored
- fonts, as those require '.notdef'. For TrueType-flavored fonts,
- this works fine as long as no unsupported glyphs are requested
- from the font.
- --notdef-outline
- Keep the outline of '.notdef' glyph. The '.notdef' glyph outline is
- used when glyphs not supported by the font are to be shown. It is not
- needed otherwise.
- --no-notdef-outline
- When including a '.notdef' glyph, remove its outline. This saves
- a few bytes. [default]
- --recommended-glyphs
- Add glyphs 0, 1, 2, and 3 to the subset, as recommended for
- TrueType-flavored fonts: '.notdef', 'NULL' or '.null', 'CR', 'space'.
- Some legacy software might require this, but no modern system does.
- --no-recommended-glyphs
- Do not add glyphs 0, 1, 2, and 3 to the subset, unless specified in
- glyph set. [default]
- --no-layout-closure
- Do not expand glyph set to add glyphs produced by OpenType layout
- features. Instead, OpenType layout features will be subset to only
- rules that are relevant to the otherwise-specified glyph set.
- --layout-features[+|-]=<feature>[,<feature>...]
- Specify (=), add to (+=) or exclude from (-=) the comma-separated
- set of OpenType layout feature tags that will be preserved.
- Glyph variants used by the preserved features are added to the
- specified subset glyph set. By default, 'calt', 'ccmp', 'clig', 'curs',
- 'dnom', 'frac', 'kern', 'liga', 'locl', 'mark', 'mkmk', 'numr', 'rclt',
- 'rlig', 'rvrn', and all features required for script shaping are
- preserved. To see the full list, try '--layout-features=?'.
- Use '*' to keep all features.
- Multiple --layout-features options can be provided if necessary.
- Examples:
- --layout-features+=onum,pnum,ss01
- * Keep the default set of features and 'onum', 'pnum', 'ss01'.
- --layout-features-='mark','mkmk'
- * Keep the default set of features but drop 'mark' and 'mkmk'.
- --layout-features='kern'
- * Only keep the 'kern' feature, drop all others.
- --layout-features=''
- * Drop all features.
- --layout-features='*'
- * Keep all features.
- --layout-features+=aalt --layout-features-=vrt2
- * Keep default set of features plus 'aalt', but drop 'vrt2'.
- --layout-scripts[+|-]=<script>[,<script>...]
- Specify (=), add to (+=) or exclude from (-=) the comma-separated
- set of OpenType layout script tags that will be preserved. LangSys tags
- can be appended to script tag, separated by '.', for example:
- 'arab.dflt,arab.URD,latn.TRK'. By default all scripts are retained ('*').
-
-Hinting options:
- --hinting
- Keep hinting [default]
- --no-hinting
- Drop glyph-specific hinting and font-wide hinting tables, as well
- as remove hinting-related bits and pieces from other tables (eg. GPOS).
- See --hinting-tables for list of tables that are dropped by default.
- Instructions and hints are stripped from 'glyf' and 'CFF ' tables
- respectively. This produces (sometimes up to 30%) smaller fonts that
- are suitable for extremely high-resolution systems, like high-end
- mobile devices and retina displays.
-
-Optimization options:
- --desubroutinize
- Remove CFF use of subroutinizes. Subroutinization is a way to make CFF
- fonts smaller. For small subsets however, desubroutinizing might make
- the font smaller. It has even been reported that desubroutinized CFF
- fonts compress better (produce smaller output) WOFF and WOFF2 fonts.
- Also see note under --no-hinting.
- --no-desubroutinize [default]
- Leave CFF subroutinizes as is, only throw away unused subroutinizes.
-
-Font table options:
- --drop-tables[+|-]=<table>[,<table>...]
- Specify (=), add to (+=) or exclude from (-=) the comma-separated
- set of tables that will be be dropped.
- By default, the following tables are dropped:
- 'BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', 'EBSC', 'SVG ', 'PCLT', 'LTSH'
- and Graphite tables: 'Feat', 'Glat', 'Gloc', 'Silf', 'Sill'.
- The tool will attempt to subset the remaining tables.
- Examples:
- --drop-tables-='SVG '
- * Drop the default set of tables but keep 'SVG '.
- --drop-tables+=GSUB
- * Drop the default set of tables and 'GSUB'.
- --drop-tables=DSIG
- * Only drop the 'DSIG' table, keep all others.
- --drop-tables=
- * Keep all tables.
- --no-subset-tables+=<table>[,<table>...]
- Add to the set of tables that will not be subsetted.
- By default, the following tables are included in this list, as
- they do not need subsetting (ignore the fact that 'loca' is listed
- here): 'gasp', 'head', 'hhea', 'maxp', 'vhea', 'OS/2', 'loca', 'name',
- 'cvt ', 'fpgm', 'prep', 'VMDX', 'DSIG', 'CPAL', 'MVAR', 'cvar', 'STAT'.
- By default, tables that the tool does not know how to subset and are not
- specified here will be dropped from the font, unless --passthrough-tables
- option is passed.
- Example:
- --no-subset-tables+=FFTM
- * Keep 'FFTM' table in the font by preventing subsetting.
- --passthrough-tables
- Do not drop tables that the tool does not know how to subset.
- --no-passthrough-tables
- Tables that the tool does not know how to subset and are not specified
- in --no-subset-tables will be dropped from the font. [default]
- --hinting-tables[-]=<table>[,<table>...]
- Specify (=), add to (+=) or exclude from (-=) the list of font-wide
- hinting tables that will be dropped if --no-hinting is specified,
- Examples:
- --hinting-tables-='VDMX'
- * Drop font-wide hinting tables except 'VDMX'.
- --hinting-tables=''
- * Keep all font-wide hinting tables (but strip hints from glyphs).
- --legacy-kern
- Keep TrueType 'kern' table even when OpenType 'GPOS' is available.
- --no-legacy-kern
- Drop TrueType 'kern' table if OpenType 'GPOS' is available. [default]
-
-Font naming options:
- These options control what is retained in the 'name' table. For numerical
- codes, see: http://www.microsoft.com/typography/otspec/name.htm
- --name-IDs[+|-]=<nameID>[,<nameID>...]
- Specify (=), add to (+=) or exclude from (-=) the set of 'name' table
- entry nameIDs that will be preserved. By default, only nameIDs between 0
- and 6 are preserved, the rest are dropped. Use '*' to keep all entries.
- Examples:
- --name-IDs+=7,8,9
- * Also keep Trademark, Manufacturer and Designer name entries.
- --name-IDs=''
- * Drop all 'name' table entries.
- --name-IDs='*'
- * keep all 'name' table entries
- --name-legacy
- Keep legacy (non-Unicode) 'name' table entries (0.x, 1.x etc.).
- XXX Note: This might be needed for some fonts that have no Unicode name
- entires for English. See: https://github.com/fonttools/fonttools/issues/146
- --no-name-legacy
- Drop legacy (non-Unicode) 'name' table entries [default]
- --name-languages[+|-]=<langID>[,<langID>]
- Specify (=), add to (+=) or exclude from (-=) the set of 'name' table
- langIDs that will be preserved. By default only records with langID
- 0x0409 (English) are preserved. Use '*' to keep all langIDs.
- --obfuscate-names
- Make the font unusable as a system font by replacing name IDs 1, 2, 3, 4,
- and 6 with dummy strings (it is still fully functional as webfont).
-
-Glyph naming and encoding options:
- --glyph-names
- Keep PS glyph names in TT-flavored fonts. In general glyph names are
- not needed for correct use of the font. However, some PDF generators
- and PDF viewers might rely on glyph names to extract Unicode text
- from PDF documents.
- --no-glyph-names
- Drop PS glyph names in TT-flavored fonts, by using 'post' table
- version 3.0. [default]
- --legacy-cmap
- Keep the legacy 'cmap' subtables (0.x, 1.x, 4.x etc.).
- --no-legacy-cmap
- Drop the legacy 'cmap' subtables. [default]
- --symbol-cmap
- Keep the 3.0 symbol 'cmap'.
- --no-symbol-cmap
- Drop the 3.0 symbol 'cmap'. [default]
-
-Other font-specific options:
- --recalc-bounds
- Recalculate font bounding boxes.
- --no-recalc-bounds
- Keep original font bounding boxes. This is faster and still safe
- for all practical purposes. [default]
- --recalc-timestamp
- Set font 'modified' timestamp to current time.
- --no-recalc-timestamp
- Do not modify font 'modified' timestamp. [default]
- --canonical-order
- Order tables as recommended in the OpenType standard. This is not
- required by the standard, nor by any known implementation.
- --no-canonical-order
- Keep original order of font tables. This is faster. [default]
- --prune-unicode-ranges
- Update the 'OS/2 ulUnicodeRange*' bits after subsetting. The Unicode
- ranges defined in the OpenType specification v1.7 are intersected with
- the Unicode codepoints specified in the font's Unicode 'cmap' subtables:
- when no overlap is found, the bit will be switched off. However, it will
- *not* be switched on if an intersection is found. [default]
- --no-prune-unicode-ranges
- Don't change the 'OS/2 ulUnicodeRange*' bits.
- --recalc-average-width
- Update the 'OS/2 xAvgCharWidth' field after subsetting.
- --no-recalc-average-width
- Don't change the 'OS/2 xAvgCharWidth' field. [default]
- --recalc-max-context
- Update the 'OS/2 usMaxContext' field after subsetting.
- --no-recalc-max-context
- Don't change the 'OS/2 usMaxContext' field. [default]
- --font-number=<number>
- Select font number for TrueType Collection (.ttc/.otc), starting from 0.
-
-Application options:
- --verbose
- Display verbose information of the subsetting process.
- --timing
- Display detailed timing information of the subsetting process.
- --xml
- Display the TTX XML representation of subsetted font.
-
-Example:
- Produce a subset containing the characters ' !"#$%' without performing
- size-reducing optimizations:
+Output options
+^^^^^^^^^^^^^^
+
+--output-file=<path>
+ The output font file. If not specified, the subsetted font
+ will be saved in as font-file.subset.
+
+--flavor=<type>
+ Specify flavor of output font file. May be 'woff' or 'woff2'.
+ Note that WOFF2 requires the Brotli Python extension, available
+ at https://github.com/google/brotli
+
+--with-zopfli
+ Use the Google Zopfli algorithm to compress WOFF. The output is 3-8 %
+ smaller than pure zlib, but the compression speed is much slower.
+ The Zopfli Python bindings are available at:
+ https://pypi.python.org/pypi/zopfli
+
+Glyph set expansion
+^^^^^^^^^^^^^^^^^^^
+
+These options control how additional glyphs are added to the subset.
+
+--retain-gids
+ Retain glyph indices; just empty glyphs not needed in-place.
+
+--notdef-glyph
+ Add the '.notdef' glyph to the subset (ie, keep it). [default]
+
+--no-notdef-glyph
+ Drop the '.notdef' glyph unless specified in the glyph set. This
+ saves a few bytes, but is not possible for Postscript-flavored
+ fonts, as those require '.notdef'. For TrueType-flavored fonts,
+ this works fine as long as no unsupported glyphs are requested
+ from the font.
+
+--notdef-outline
+ Keep the outline of '.notdef' glyph. The '.notdef' glyph outline is
+ used when glyphs not supported by the font are to be shown. It is not
+ needed otherwise.
+
+--no-notdef-outline
+ When including a '.notdef' glyph, remove its outline. This saves
+ a few bytes. [default]
+
+--recommended-glyphs
+ Add glyphs 0, 1, 2, and 3 to the subset, as recommended for
+ TrueType-flavored fonts: '.notdef', 'NULL' or '.null', 'CR', 'space'.
+ Some legacy software might require this, but no modern system does.
+
+--no-recommended-glyphs
+ Do not add glyphs 0, 1, 2, and 3 to the subset, unless specified in
+ glyph set. [default]
+
+--no-layout-closure
+ Do not expand glyph set to add glyphs produced by OpenType layout
+ features. Instead, OpenType layout features will be subset to only
+ rules that are relevant to the otherwise-specified glyph set.
+
+--layout-features[+|-]=<feature>[,<feature>...]
+ Specify (=), add to (+=) or exclude from (-=) the comma-separated
+ set of OpenType layout feature tags that will be preserved.
+ Glyph variants used by the preserved features are added to the
+ specified subset glyph set. By default, 'calt', 'ccmp', 'clig', 'curs',
+ 'dnom', 'frac', 'kern', 'liga', 'locl', 'mark', 'mkmk', 'numr', 'rclt',
+ 'rlig', 'rvrn', and all features required for script shaping are
+ preserved. To see the full list, try '--layout-features=?'.
+ Use '*' to keep all features.
+ Multiple --layout-features options can be provided if necessary.
+ Examples:
+
+ --layout-features+=onum,pnum,ss01
+ * Keep the default set of features and 'onum', 'pnum', 'ss01'.
+ --layout-features-='mark','mkmk'
+ * Keep the default set of features but drop 'mark' and 'mkmk'.
+ --layout-features='kern'
+ * Only keep the 'kern' feature, drop all others.
+ --layout-features=''
+ * Drop all features.
+ --layout-features='*'
+ * Keep all features.
+ --layout-features+=aalt --layout-features-=vrt2
+ * Keep default set of features plus 'aalt', but drop 'vrt2'.
+
+--layout-scripts[+|-]=<script>[,<script>...]
+ Specify (=), add to (+=) or exclude from (-=) the comma-separated
+ set of OpenType layout script tags that will be preserved. LangSys tags
+ can be appended to script tag, separated by '.', for example:
+ 'arab.dflt,arab.URD,latn.TRK'. By default all scripts are retained ('*').
+
+Hinting options
+^^^^^^^^^^^^^^^
+
+--hinting
+ Keep hinting [default]
+
+--no-hinting
+ Drop glyph-specific hinting and font-wide hinting tables, as well
+ as remove hinting-related bits and pieces from other tables (eg. GPOS).
+ See --hinting-tables for list of tables that are dropped by default.
+ Instructions and hints are stripped from 'glyf' and 'CFF ' tables
+ respectively. This produces (sometimes up to 30%) smaller fonts that
+ are suitable for extremely high-resolution systems, like high-end
+ mobile devices and retina displays.
+
+Optimization options
+^^^^^^^^^^^^^^^^^^^^
+
+--desubroutinize
+ Remove CFF use of subroutinizes. Subroutinization is a way to make CFF
+ fonts smaller. For small subsets however, desubroutinizing might make
+ the font smaller. It has even been reported that desubroutinized CFF
+ fonts compress better (produce smaller output) WOFF and WOFF2 fonts.
+ Also see note under --no-hinting.
+
+--no-desubroutinize [default]
+ Leave CFF subroutinizes as is, only throw away unused subroutinizes.
+
+Font table options
+^^^^^^^^^^^^^^^^^^
+
+--drop-tables[+|-]=<table>[,<table>...]
+ Specify (=), add to (+=) or exclude from (-=) the comma-separated
+ set of tables that will be be dropped.
+ By default, the following tables are dropped:
+ 'BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', 'EBSC', 'PCLT', 'LTSH'
+ and Graphite tables: 'Feat', 'Glat', 'Gloc', 'Silf', 'Sill'.
+ The tool will attempt to subset the remaining tables.
+
+ Examples:
+
+ --drop-tables-='BASE'
+ * Drop the default set of tables but keep 'BASE'.
+
+ --drop-tables+=GSUB
+ * Drop the default set of tables and 'GSUB'.
+
+ --drop-tables=DSIG
+ * Only drop the 'DSIG' table, keep all others.
+
+ --drop-tables=
+ * Keep all tables.
+
+--no-subset-tables+=<table>[,<table>...]
+ Add to the set of tables that will not be subsetted.
+ By default, the following tables are included in this list, as
+ they do not need subsetting (ignore the fact that 'loca' is listed
+ here): 'gasp', 'head', 'hhea', 'maxp', 'vhea', 'OS/2', 'loca', 'name',
+ 'cvt ', 'fpgm', 'prep', 'VMDX', 'DSIG', 'CPAL', 'MVAR', 'cvar', 'STAT'.
+ By default, tables that the tool does not know how to subset and are not
+ specified here will be dropped from the font, unless --passthrough-tables
+ option is passed.
+
+ Example:
+
+ --no-subset-tables+=FFTM
+ * Keep 'FFTM' table in the font by preventing subsetting.
+
+--passthrough-tables
+ Do not drop tables that the tool does not know how to subset.
+
+--no-passthrough-tables
+ Tables that the tool does not know how to subset and are not specified
+ in --no-subset-tables will be dropped from the font. [default]
+
+--hinting-tables[-]=<table>[,<table>...]
+ Specify (=), add to (+=) or exclude from (-=) the list of font-wide
+ hinting tables that will be dropped if --no-hinting is specified.
+
+ Examples:
+
+ --hinting-tables-='VDMX'
+ * Drop font-wide hinting tables except 'VDMX'.
+ --hinting-tables=''
+ * Keep all font-wide hinting tables (but strip hints from glyphs).
+
+--legacy-kern
+ Keep TrueType 'kern' table even when OpenType 'GPOS' is available.
+
+--no-legacy-kern
+ Drop TrueType 'kern' table if OpenType 'GPOS' is available. [default]
+
+Font naming options
+^^^^^^^^^^^^^^^^^^^
+
+These options control what is retained in the 'name' table. For numerical
+codes, see: http://www.microsoft.com/typography/otspec/name.htm
+
+--name-IDs[+|-]=<nameID>[,<nameID>...]
+ Specify (=), add to (+=) or exclude from (-=) the set of 'name' table
+ entry nameIDs that will be preserved. By default, only nameIDs between 0
+ and 6 are preserved, the rest are dropped. Use '*' to keep all entries.
+
+ Examples:
+
+ --name-IDs+=7,8,9
+ * Also keep Trademark, Manufacturer and Designer name entries.
+ --name-IDs=''
+ * Drop all 'name' table entries.
+ --name-IDs='*'
+ * keep all 'name' table entries
+
+--name-legacy
+ Keep legacy (non-Unicode) 'name' table entries (0.x, 1.x etc.).
+ XXX Note: This might be needed for some fonts that have no Unicode name
+ entires for English. See: https://github.com/fonttools/fonttools/issues/146
+
+--no-name-legacy
+ Drop legacy (non-Unicode) 'name' table entries [default]
+
+--name-languages[+|-]=<langID>[,<langID>]
+ Specify (=), add to (+=) or exclude from (-=) the set of 'name' table
+ langIDs that will be preserved. By default only records with langID
+ 0x0409 (English) are preserved. Use '*' to keep all langIDs.
+
+--obfuscate-names
+ Make the font unusable as a system font by replacing name IDs 1, 2, 3, 4,
+ and 6 with dummy strings (it is still fully functional as webfont).
+
+Glyph naming and encoding options
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+--glyph-names
+ Keep PS glyph names in TT-flavored fonts. In general glyph names are
+ not needed for correct use of the font. However, some PDF generators
+ and PDF viewers might rely on glyph names to extract Unicode text
+ from PDF documents.
+--no-glyph-names
+ Drop PS glyph names in TT-flavored fonts, by using 'post' table
+ version 3.0. [default]
+--legacy-cmap
+ Keep the legacy 'cmap' subtables (0.x, 1.x, 4.x etc.).
+--no-legacy-cmap
+ Drop the legacy 'cmap' subtables. [default]
+--symbol-cmap
+ Keep the 3.0 symbol 'cmap'.
+--no-symbol-cmap
+ Drop the 3.0 symbol 'cmap'. [default]
+
+Other font-specific options
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+--recalc-bounds
+ Recalculate font bounding boxes.
+--no-recalc-bounds
+ Keep original font bounding boxes. This is faster and still safe
+ for all practical purposes. [default]
+--recalc-timestamp
+ Set font 'modified' timestamp to current time.
+--no-recalc-timestamp
+ Do not modify font 'modified' timestamp. [default]
+--canonical-order
+ Order tables as recommended in the OpenType standard. This is not
+ required by the standard, nor by any known implementation.
+--no-canonical-order
+ Keep original order of font tables. This is faster. [default]
+--prune-unicode-ranges
+ Update the 'OS/2 ulUnicodeRange*' bits after subsetting. The Unicode
+ ranges defined in the OpenType specification v1.7 are intersected with
+ the Unicode codepoints specified in the font's Unicode 'cmap' subtables:
+ when no overlap is found, the bit will be switched off. However, it will
+ *not* be switched on if an intersection is found. [default]
+--no-prune-unicode-ranges
+ Don't change the 'OS/2 ulUnicodeRange*' bits.
+--recalc-average-width
+ Update the 'OS/2 xAvgCharWidth' field after subsetting.
+--no-recalc-average-width
+ Don't change the 'OS/2 xAvgCharWidth' field. [default]
+--recalc-max-context
+ Update the 'OS/2 usMaxContext' field after subsetting.
+--no-recalc-max-context
+ Don't change the 'OS/2 usMaxContext' field. [default]
+--font-number=<number>
+ Select font number for TrueType Collection (.ttc/.otc), starting from 0.
+--pretty-svg
+ When subsetting SVG table, use lxml pretty_print=True option to indent
+ the XML output (only recommended for debugging purposes).
+
+Application options
+^^^^^^^^^^^^^^^^^^^
+
+--verbose
+ Display verbose information of the subsetting process.
+--timing
+ Display detailed timing information of the subsetting process.
+--xml
+ Display the TTX XML representation of subsetted font.
+
+Example
+^^^^^^^
+
+Produce a subset containing the characters ' !"#$%' without performing
+size-reducing optimizations::
$ pyftsubset font.ttf --unicodes="U+0020-0025" \\
--layout-features='*' --glyph-names --symbol-cmap --legacy-cmap \\
@@ -362,26 +443,6 @@ log.glyphs = MethodType(_log_glyphs, log)
timer = Timer(logger=logging.getLogger("fontTools.subset.timer"))
-def _add_method(*clazzes):
- """Returns a decorator function that adds a new method to one or
- more classes."""
- def wrapper(method):
- done = []
- for clazz in clazzes:
- if clazz in done: continue # Support multiple names of a clazz
- done.append(clazz)
- assert clazz.__name__ != 'DefaultTable', \
- 'Oops, table class not found.'
- assert not hasattr(clazz, method.__name__), \
- "Oops, class '%s' has method '%s'." % (clazz.__name__,
- method.__name__)
- setattr(clazz, method.__name__, method)
- return None
- return wrapper
-
-def _uniq_sort(l):
- return sorted(set(l))
-
def _dict_subset(d, glyphs):
return {g:d[g] for g in glyphs}
@@ -527,6 +588,17 @@ def subset_glyphs(self, s):
else:
assert 0, "unknown format: %s" % self.Format
+@_add_method(otTables.Device)
+def is_hinting(self):
+ return self.DeltaFormat in (1,2,3)
+
+@_add_method(otTables.ValueRecord)
+def prune_hints(self):
+ for name in ['XPlaDevice', 'YPlaDevice', 'XAdvDevice', 'YAdvDevice']:
+ v = getattr(self, name, None)
+ if v is not None and v.is_hinting():
+ delattr(self, name)
+
@_add_method(otTables.SinglePos)
def subset_glyphs(self, s):
if self.Format == 1:
@@ -543,14 +615,27 @@ def subset_glyphs(self, s):
@_add_method(otTables.SinglePos)
def prune_post_subset(self, font, options):
- if not options.hinting:
- # Drop device tables
- self.ValueFormat &= ~0x00F0
+ if self.Value is None:
+ assert self.ValueFormat == 0
+ return True
+
+ # Shrink ValueFormat
+ if self.Format == 1:
+ if not options.hinting:
+ self.Value.prune_hints()
+ self.ValueFormat = self.Value.getEffectiveFormat()
+ elif self.Format == 2:
+ if not options.hinting:
+ for v in self.Value:
+ v.prune_hints()
+ self.ValueFormat = reduce(int.__or__, [v.getEffectiveFormat() for v in self.Value], 0)
+
# Downgrade to Format 1 if all ValueRecords are the same
if self.Format == 2 and all(v == self.Value[0] for v in self.Value):
self.Format = 1
self.Value = self.Value[0] if self.ValueFormat != 0 else None
del self.ValueCount
+
return True
@_add_method(otTables.PairPos)
@@ -587,10 +672,22 @@ def subset_glyphs(self, s):
@_add_method(otTables.PairPos)
def prune_post_subset(self, font, options):
if not options.hinting:
- # Drop device tables
- self.ValueFormat1 &= ~0x00F0
- self.ValueFormat2 &= ~0x00F0
- return True
+ attr1, attr2 = {
+ 1: ('PairSet', 'PairValueRecord'),
+ 2: ('Class1Record', 'Class2Record'),
+ }[self.Format]
+
+ self.ValueFormat1 = self.ValueFormat2 = 0
+ for row in getattr(self, attr1):
+ for r in getattr(row, attr2):
+ if r.Value1:
+ r.Value1.prune_hints()
+ self.ValueFormat1 |= r.Value1.getEffectiveFormat()
+ if r.Value2:
+ r.Value2.prune_hints()
+ self.ValueFormat2 |= r.Value2.getEffectiveFormat()
+
+ return bool(self.ValueFormat1 | self.ValueFormat2)
@_add_method(otTables.CursivePos)
def subset_glyphs(self, s):
@@ -606,9 +703,15 @@ def subset_glyphs(self, s):
@_add_method(otTables.Anchor)
def prune_hints(self):
- # Drop device tables / contour anchor point
- self.ensureDecompiled()
- self.Format = 1
+ if self.Format == 2:
+ self.Format = 1
+ elif self.Format == 3:
+ for name in ('XDeviceTable', 'YDeviceTable'):
+ v = getattr(self, name, None)
+ if v is not None and v.is_hinting():
+ setattr(self, name, None)
+ if self.XDeviceTable is None and self.YDeviceTable is None:
+ self.Format = 1
@_add_method(otTables.CursivePos)
def prune_post_subset(self, font, options):
@@ -713,7 +816,6 @@ def subset_glyphs(self, s):
@_add_method(otTables.MarkMarkPos)
def prune_post_subset(self, font, options):
if not options.hinting:
- # Drop device tables or contour anchor point
for m in self.Mark1Array.MarkRecord:
if m.MarkAnchor:
m.MarkAnchor.prune_hints()
@@ -976,7 +1078,7 @@ def closure_glyphs(self, s, cur_glyphs):
chaos.update(range(seqi, len(getattr(r, c.Input))+2))
lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
elif self.Format == 3:
- if not all(x.intersect(s.glyphs) for x in c.RuleData(self)):
+ if not all(x is not None and x.intersect(s.glyphs) for x in c.RuleData(self)):
return []
r = self
input_coverages = getattr(r, c.Input)
@@ -1071,7 +1173,7 @@ def subset_glyphs(self, s):
return bool(rss)
elif self.Format == 3:
- return all(x.subset(s.glyphs) for x in c.RuleData(self))
+ return all(x is not None and x.subset(s.glyphs) for x in c.RuleData(self))
else:
assert 0, "unknown format: %s" % self.Format
@@ -1266,7 +1368,13 @@ def subset_lookups(self, lookup_indices):
self.LookupListIndex = [lookup_indices.index(l)
for l in self.LookupListIndex]
self.LookupCount = len(self.LookupListIndex)
- return self.LookupCount or self.FeatureParams
+ # keep 'size' feature even if it contains no lookups; but drop any other
+ # empty feature (e.g. FeatureParams for stylistic set names)
+ # https://github.com/fonttools/fonttools/issues/2324
+ return (
+ self.LookupCount or
+ isinstance(self.FeatureParams, otTables.FeatureParamsSize)
+ )
@_add_method(otTables.FeatureList)
def subset_lookups(self, lookup_indices):
@@ -2007,7 +2115,7 @@ def closure_glyphs(self, s):
self.ColorLayers = self._decompileColorLayersV0(self.table)
self.ColorLayersV1 = {
rec.BaseGlyph: rec.Paint
- for rec in self.table.BaseGlyphV1List.BaseGlyphV1Record
+ for rec in self.table.BaseGlyphList.BaseGlyphPaintRecord
}
decompose = s.glyphs
@@ -2031,31 +2139,48 @@ def subset_glyphs(self, s):
from fontTools.colorLib.unbuilder import unbuildColrV1
from fontTools.colorLib.builder import buildColrV1, populateCOLRv0
+ # only include glyphs after COLR closure, which in turn comes after cmap and GSUB
+ # closure, but importantly before glyf/CFF closures. COLR layers can refer to
+ # composite glyphs, and that's ok, since glyf/CFF closures happen after COLR closure
+ # and take care of those. If we also included glyphs resulting from glyf/CFF closures
+ # when deciding which COLR base glyphs to retain, then we may end up with a situation
+ # whereby a COLR base glyph is kept, not because directly requested (cmap)
+ # or substituted (GSUB) or referenced by another COLRv1 PaintColrGlyph, but because
+ # it corresponds to (has same GID as) a non-COLR glyph that happens to be used as a
+ # component in glyf or CFF table. Best case scenario we retain more glyphs than
+ # required; worst case we retain incomplete COLR records that try to reference
+ # glyphs that are no longer in the final subset font.
+ # https://github.com/fonttools/fonttools/issues/2461
+ s.glyphs = s.glyphs_colred
+
self.ColorLayers = {g: self.ColorLayers[g] for g in s.glyphs if g in self.ColorLayers}
if self.version == 0:
return bool(self.ColorLayers)
- colorGlyphsV1 = unbuildColrV1(self.table.LayerV1List, self.table.BaseGlyphV1List)
- self.table.LayerV1List, self.table.BaseGlyphV1List = buildColrV1(
+ colorGlyphsV1 = unbuildColrV1(self.table.LayerList, self.table.BaseGlyphList)
+ self.table.LayerList, self.table.BaseGlyphList = buildColrV1(
{g: colorGlyphsV1[g] for g in colorGlyphsV1 if g in s.glyphs}
)
del self.ColorLayersV1
+ if self.table.ClipList is not None:
+ clips = self.table.ClipList.clips
+ self.table.ClipList.clips = {g: clips[g] for g in clips if g in s.glyphs}
+
layersV0 = self.ColorLayers
- if not self.table.BaseGlyphV1List.BaseGlyphV1Record:
+ if not self.table.BaseGlyphList.BaseGlyphPaintRecord:
# no more COLRv1 glyphs: downgrade to version 0
self.version = 0
del self.table
return bool(layersV0)
- if layersV0:
- populateCOLRv0(
- self.table,
- {
- g: [(layer.name, layer.colorID) for layer in layersV0[g]]
- for g in layersV0
- },
- )
+ populateCOLRv0(
+ self.table,
+ {
+ g: [(layer.name, layer.colorID) for layer in layersV0[g]]
+ for g in layersV0
+ },
+ )
del self.ColorLayers
# TODO: also prune ununsed varIndices in COLR.VarStore
@@ -2070,11 +2195,11 @@ def prune_post_subset(self, font, options):
colors_by_index = defaultdict(list)
def collect_colors_by_index(paint):
- if hasattr(paint, "Color"): # either solid colors...
- colors_by_index[paint.Color.PaletteIndex].append(paint.Color)
+ if hasattr(paint, "PaletteIndex"): # either solid colors...
+ colors_by_index[paint.PaletteIndex].append(paint)
elif hasattr(paint, "ColorLine"): # ... or gradient color stops
for stop in paint.ColorLine.ColorStop:
- colors_by_index[stop.Color.PaletteIndex].append(stop.Color)
+ colors_by_index[stop.PaletteIndex].append(stop)
if colr.version == 0:
for layers in colr.ColorLayers.values():
@@ -2084,10 +2209,12 @@ def prune_post_subset(self, font, options):
if colr.table.LayerRecordArray:
for layer in colr.table.LayerRecordArray.LayerRecord:
colors_by_index[layer.PaletteIndex].append(layer)
- for record in colr.table.BaseGlyphV1List.BaseGlyphV1Record:
+ for record in colr.table.BaseGlyphList.BaseGlyphPaintRecord:
record.Paint.traverse(colr.table, collect_colors_by_index)
- retained_palette_indices = set(colors_by_index.keys())
+ # don't remap palette entry index 0xFFFF, this is always the foreground color
+ # https://github.com/fonttools/fonttools/issues/2257
+ retained_palette_indices = set(colors_by_index.keys()) - {0xFFFF}
for palette in self.palettes:
palette[:] = [c for i, c in enumerate(palette) if i in retained_palette_indices]
assert len(palette) == len(retained_palette_indices)
@@ -2201,7 +2328,7 @@ def subset_glyphs(self, s):
def remapComponentsFast(self, glyphidmap):
if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0:
return # Not composite
- data = array.array("B", self.data)
+ data = self.data = bytearray(self.data)
i = 10
more = 1
while more:
@@ -2221,8 +2348,6 @@ def remapComponentsFast(self, glyphidmap):
elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO
more = flags & 0x0020 # MORE_COMPONENTS
- self.data = data.tobytes()
-
@_add_method(ttLib.getTableClass('glyf'))
def closure_glyphs(self, s):
glyphSet = self.glyphs
@@ -2245,7 +2370,7 @@ def prune_pre_subset(self, font, options):
g = self[self.glyphOrder[0]]
# Yay, easy!
g.__dict__.clear()
- g.data = ""
+ g.data = b''
return True
@_add_method(ttLib.getTableClass('glyf'))
@@ -2260,7 +2385,7 @@ def subset_glyphs(self, s):
Glyph = ttLib.getTableModule('glyf').Glyph
for g in s.glyphs_emptied:
self.glyphs[g] = Glyph()
- self.glyphs[g].data = ''
+ self.glyphs[g].data = b''
self.glyphOrder = [g for g in self.glyphOrder if g in s.glyphs or g in s.glyphs_emptied]
# Don't drop empty 'glyf' tables, otherwise 'loca' doesn't get subset.
return True
@@ -2454,7 +2579,7 @@ class Options(object):
# spaces in tag names (e.g. "SVG ", "cvt ") are stripped by the argument parser
_drop_tables_default = ['BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC',
- 'EBSC', 'SVG', 'PCLT', 'LTSH']
+ 'EBSC', 'PCLT', 'LTSH']
_drop_tables_default += ['Feat', 'Glat', 'Gloc', 'Silf', 'Sill'] # Graphite
_no_subset_tables_default = ['avar', 'fvar',
'gasp', 'head', 'hhea', 'maxp',
@@ -2521,6 +2646,7 @@ class Options(object):
self.timing = False
self.xml = False
self.font_number = -1
+ self.pretty_svg = False
self.set(**kwargs)
@@ -2659,7 +2785,7 @@ class Subsetter(object):
def _closure_glyphs(self, font):
realGlyphs = set(font.getGlyphOrder())
- glyph_order = font.getGlyphOrder()
+ self.orig_glyph_order = glyph_order = font.getGlyphOrder()
self.glyphs_requested = set()
self.glyphs_requested.update(self.glyph_names_requested)
@@ -2739,6 +2865,7 @@ class Subsetter(object):
log.info("Closed glyph list over '%s': %d glyphs after",
table, len(self.glyphs))
log.glyphs(self.glyphs, font=font)
+ setattr(self, f"glyphs_{table.lower()}ed", frozenset(self.glyphs))
if 'glyf' in font:
with timer("close glyph list over 'glyf'"):
@@ -2778,6 +2905,24 @@ class Subsetter(object):
self.reverseEmptiedGlyphMap = {g:order[g] for g in self.glyphs_emptied}
+ if not self.options.retain_gids:
+ new_glyph_order = [
+ g for g in glyph_order if g in self.glyphs_retained
+ ]
+ else:
+ new_glyph_order = [
+ g for g in glyph_order
+ if font.getGlyphID(g) <= self.last_retained_order
+ ]
+ # We'll call font.setGlyphOrder() at the end of _subset_glyphs when all
+ # tables have been subsetted. Below, we use the new glyph order to get
+ # a map from old to new glyph indices, which can be useful when
+ # subsetting individual tables (e.g. SVG) that refer to GIDs.
+ self.new_glyph_order = new_glyph_order
+ self.glyph_index_map = {
+ order[new_glyph_order[i]]: i
+ for i in range(len(new_glyph_order))
+ }
log.info("Retaining %d glyphs", len(self.glyphs_retained))
@@ -2807,14 +2952,7 @@ class Subsetter(object):
del font[tag]
with timer("subset GlyphOrder"):
- glyphOrder = font.getGlyphOrder()
- if not self.options.retain_gids:
- glyphOrder = [g for g in glyphOrder if g in self.glyphs_retained]
- else:
- glyphOrder = [g for g in glyphOrder if font.getGlyphID(g) <= self.last_retained_order]
-
- font.setGlyphOrder(glyphOrder)
- font._buildReverseGlyphOrderDict()
+ font.setGlyphOrder(self.new_glyph_order)
def _prune_post_subset(self, font):
@@ -2863,13 +3001,11 @@ class Subsetter(object):
@timer("load font")
def load_font(fontFile,
options,
- allowVID=False,
checkChecksums=0,
dontLoadGlyphNames=False,
lazy=True):
font = ttLib.TTFont(fontFile,
- allowVID=allowVID,
checkChecksums=checkChecksums,
recalcBBoxes=options.recalc_bounds,
recalcTimestamp=options.recalc_timestamp,
diff --git a/Lib/fontTools/subset/cff.py b/Lib/fontTools/subset/cff.py
index b59c6b96..0dcb7975 100644
--- a/Lib/fontTools/subset/cff.py
+++ b/Lib/fontTools/subset/cff.py
@@ -2,27 +2,10 @@ from fontTools.misc import psCharStrings
from fontTools import ttLib
from fontTools.pens.basePen import NullPen
from fontTools.misc.roundTools import otRound
+from fontTools.misc.loggingTools import deprecateFunction
from fontTools.varLib.varStore import VarStoreInstancer
+from fontTools.subset.util import _add_method, _uniq_sort
-def _add_method(*clazzes):
- """Returns a decorator function that adds a new method to one or
- more classes."""
- def wrapper(method):
- done = []
- for clazz in clazzes:
- if clazz in done: continue # Support multiple names of a clazz
- done.append(clazz)
- assert clazz.__name__ != 'DefaultTable', \
- 'Oops, table class not found.'
- assert not hasattr(clazz, method.__name__), \
- "Oops, class '%s' has method '%s'." % (clazz.__name__,
- method.__name__)
- setattr(clazz, method.__name__, method)
- return None
- return wrapper
-
-def _uniq_sort(l):
- return sorted(set(l))
class _ClosureGlyphsT2Decompiler(psCharStrings.SimpleT2Decompiler):
@@ -70,8 +53,7 @@ def _empty_charstring(font, glyphName, isCFF2, ignoreWidth=False):
c, fdSelectIndex = font.CharStrings.getItemAndSelector(glyphName)
if isCFF2 or ignoreWidth:
# CFF2 charstrings have no widths nor 'endchar' operators
- c.decompile()
- c.program = [] if isCFF2 else ['endchar']
+ c.setProgram([] if isCFF2 else ['endchar'])
else:
if hasattr(font, 'FDArray') and font.FDArray is not None:
private = font.FDArray[fdSelectIndex].Private
@@ -137,9 +119,12 @@ def subset_glyphs(self, s):
#sel.format = None
sel.format = 3
sel.gidArray = [sel.gidArray[i] for i in indices]
- cs.charStrings = {g:indices.index(v)
- for g,v in cs.charStrings.items()
- if g in glyphs}
+ newCharStrings = {}
+ for indicesIdx, charsetIdx in enumerate(indices):
+ g = font.charset[charsetIdx]
+ if g in cs.charStrings:
+ newCharStrings[g] = indicesIdx
+ cs.charStrings = newCharStrings
else:
cs.charStrings = {g:v
for g,v in cs.charStrings.items()
@@ -363,86 +348,6 @@ class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
hints.status = max(hints.status, subr_hints.status)
-class StopHintCountEvent(Exception):
- pass
-
-
-
-
-class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler):
- stop_hintcount_ops = ("op_hintmask", "op_cntrmask", "op_rmoveto", "op_hmoveto",
- "op_vmoveto")
-
- def __init__(self, localSubrs, globalSubrs, private=None):
- psCharStrings.SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs,
- private)
-
- def execute(self, charString):
- self.need_hintcount = True # until proven otherwise
- for op_name in self.stop_hintcount_ops:
- setattr(self, op_name, self.stop_hint_count)
-
- if hasattr(charString, '_desubroutinized'):
- # If a charstring has already been desubroutinized, we will still
- # need to execute it if we need to count hints in order to
- # compute the byte length for mask arguments, and haven't finished
- # counting hints pairs.
- if self.need_hintcount and self.callingStack:
- try:
- psCharStrings.SimpleT2Decompiler.execute(self, charString)
- except StopHintCountEvent:
- del self.callingStack[-1]
- return
-
- charString._patches = []
- psCharStrings.SimpleT2Decompiler.execute(self, charString)
- desubroutinized = charString.program[:]
- for idx, expansion in reversed(charString._patches):
- assert idx >= 2
- assert desubroutinized[idx - 1] in ['callsubr', 'callgsubr'], desubroutinized[idx - 1]
- assert type(desubroutinized[idx - 2]) == int
- if expansion[-1] == 'return':
- expansion = expansion[:-1]
- desubroutinized[idx-2:idx] = expansion
- if not self.private.in_cff2:
- if 'endchar' in desubroutinized:
- # Cut off after first endchar
- desubroutinized = desubroutinized[:desubroutinized.index('endchar') + 1]
- else:
- if not len(desubroutinized) or desubroutinized[-1] != 'return':
- desubroutinized.append('return')
-
- charString._desubroutinized = desubroutinized
- del charString._patches
-
- def op_callsubr(self, index):
- subr = self.localSubrs[self.operandStack[-1]+self.localBias]
- psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
- self.processSubr(index, subr)
-
- def op_callgsubr(self, index):
- subr = self.globalSubrs[self.operandStack[-1]+self.globalBias]
- psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
- self.processSubr(index, subr)
-
- def stop_hint_count(self, *args):
- self.need_hintcount = False
- for op_name in self.stop_hintcount_ops:
- setattr(self, op_name, None)
- cs = self.callingStack[-1]
- if hasattr(cs, '_desubroutinized'):
- raise StopHintCountEvent()
-
- def op_hintmask(self, index):
- psCharStrings.SimpleT2Decompiler.op_hintmask(self, index)
- if self.need_hintcount:
- self.stop_hint_count()
-
- def processSubr(self, index, subr):
- cs = self.callingStack[-1]
- if not hasattr(cs, '_desubroutinized'):
- cs._patches.append((index, subr._desubroutinized))
-
@_add_method(ttLib.getTableClass('CFF '))
def prune_post_subset(self, ttfFont, options):
@@ -462,7 +367,7 @@ def prune_post_subset(self, ttfFont, options):
# Desubroutinize if asked for
if options.desubroutinize:
- self.desubroutinize()
+ cff.desubroutinize()
# Drop hints if not needed
if not options.hinting:
@@ -478,36 +383,11 @@ def _delete_empty_subrs(private_dict):
del private_dict.rawDict['Subrs']
del private_dict.Subrs
+
+@deprecateFunction("use 'CFFFontSet.desubroutinize()' instead", category=DeprecationWarning)
@_add_method(ttLib.getTableClass('CFF '))
def desubroutinize(self):
- cff = self.cff
- for fontname in cff.keys():
- font = cff[fontname]
- cs = font.CharStrings
- for g in font.charset:
- c, _ = cs.getItemAndSelector(g)
- c.decompile()
- subrs = getattr(c.private, "Subrs", [])
- decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs, c.private)
- decompiler.execute(c)
- c.program = c._desubroutinized
- del c._desubroutinized
- # Delete all the local subrs
- if hasattr(font, 'FDArray'):
- for fd in font.FDArray:
- pd = fd.Private
- if hasattr(pd, 'Subrs'):
- del pd.Subrs
- if 'Subrs' in pd.rawDict:
- del pd.rawDict['Subrs']
- else:
- pd = font.Private
- if hasattr(pd, 'Subrs'):
- del pd.Subrs
- if 'Subrs' in pd.rawDict:
- del pd.rawDict['Subrs']
- # as well as the global subrs
- cff.GlobalSubrs.clear()
+ self.cff.desubroutinize()
@_add_method(ttLib.getTableClass('CFF '))
diff --git a/Lib/fontTools/subset/svg.py b/Lib/fontTools/subset/svg.py
new file mode 100644
index 00000000..e25fb3e6
--- /dev/null
+++ b/Lib/fontTools/subset/svg.py
@@ -0,0 +1,248 @@
+from __future__ import annotations
+
+import re
+from functools import lru_cache
+from itertools import chain, count
+from typing import Dict, Iterable, Iterator, List, Optional, Set, Tuple
+
+try:
+ from lxml import etree
+except ModuleNotFoundError:
+ # lxml is required for subsetting SVG, but we prefer to delay the import error
+ # until subset_glyphs() is called (i.e. if font to subset has an 'SVG ' table)
+ etree = None
+
+from fontTools import ttLib
+from fontTools.subset.util import _add_method
+
+
+__all__ = ["subset_glyphs"]
+
+
+GID_RE = re.compile(r"^glyph(\d+)$")
+
+NAMESPACES = {
+ "svg": "http://www.w3.org/2000/svg",
+ "xlink": "http://www.w3.org/1999/xlink",
+}
+XLINK_HREF = f'{{{NAMESPACES["xlink"]}}}href'
+
+
+# TODO(antrotype): Replace with functools.cache once we are 3.9+
+@lru_cache(maxsize=None)
+def xpath(path):
+ # compile XPath upfront, caching result to reuse on multiple elements
+ return etree.XPath(path, namespaces=NAMESPACES)
+
+
+def group_elements_by_id(tree: etree.Element) -> Dict[str, etree.Element]:
+ # select all svg elements with 'id' attribute no matter where they are
+ # including the root element itself:
+ # https://github.com/fonttools/fonttools/issues/2548
+ return {el.attrib["id"]: el for el in xpath("//svg:*[@id]")(tree)}
+
+
+def parse_css_declarations(style_attr: str) -> Dict[str, str]:
+ # https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/style
+ # https://developer.mozilla.org/en-US/docs/Web/CSS/Syntax#css_declarations
+ result = {}
+ for declaration in style_attr.split(";"):
+ if declaration.count(":") == 1:
+ property_name, value = declaration.split(":")
+ property_name = property_name.strip()
+ result[property_name] = value.strip()
+ elif declaration.strip():
+ raise ValueError(f"Invalid CSS declaration syntax: {declaration}")
+ return result
+
+
+def iter_referenced_ids(tree: etree.Element) -> Iterator[str]:
+ # Yield all the ids that can be reached via references from this element tree.
+ # We currently support xlink:href (as used by <use> and gradient templates),
+ # and local url(#...) links found in fill or clip-path attributes
+ # TODO(anthrotype): Check we aren't missing other supported kinds of reference
+ find_svg_elements_with_references = xpath(
+ ".//svg:*[ "
+ "starts-with(@xlink:href, '#') "
+ "or starts-with(@fill, 'url(#') "
+ "or starts-with(@clip-path, 'url(#') "
+ "or contains(@style, ':url(#') "
+ "]",
+ )
+ for el in chain([tree], find_svg_elements_with_references(tree)):
+ ref_id = href_local_target(el)
+ if ref_id is not None:
+ yield ref_id
+
+ attrs = el.attrib
+ if "style" in attrs:
+ attrs = {**attrs, **parse_css_declarations(el.attrib["style"])}
+ for attr in ("fill", "clip-path"):
+ if attr in attrs:
+ value = attrs[attr]
+ if value.startswith("url(#") and value.endswith(")"):
+ ref_id = value[5:-1]
+ assert ref_id
+ yield ref_id
+
+
+def closure_element_ids(
+ elements: Dict[str, etree.Element], element_ids: Set[str]
+) -> None:
+ # Expand the initial subset of element ids to include ids that can be reached
+ # via references from the initial set.
+ unvisited = element_ids
+ while unvisited:
+ referenced: Set[str] = set()
+ for el_id in unvisited:
+ if el_id not in elements:
+ # ignore dangling reference; not our job to validate svg
+ continue
+ referenced.update(iter_referenced_ids(elements[el_id]))
+ referenced -= element_ids
+ element_ids.update(referenced)
+ unvisited = referenced
+
+
+def subset_elements(el: etree.Element, retained_ids: Set[str]) -> bool:
+ # Keep elements if their id is in the subset, or any of their children's id is.
+ # Drop elements whose id is not in the subset, and either have no children,
+ # or all their children are being dropped.
+ if el.attrib.get("id") in retained_ids:
+ # if id is in the set, don't recurse; keep whole subtree
+ return True
+ # recursively subset all the children; we use a list comprehension instead
+ # of a parentheses-less generator expression because we don't want any() to
+ # short-circuit, as our function has a side effect of dropping empty elements.
+ if any([subset_elements(e, retained_ids) for e in el]):
+ return True
+ assert len(el) == 0
+ parent = el.getparent()
+ if parent is not None:
+ parent.remove(el)
+ return False
+
+
+def remap_glyph_ids(
+ svg: etree.Element, glyph_index_map: Dict[int, int]
+) -> Dict[str, str]:
+ # Given {old_gid: new_gid} map, rename all elements containing id="glyph{gid}"
+ # special attributes
+ elements = group_elements_by_id(svg)
+ id_map = {}
+ for el_id, el in elements.items():
+ m = GID_RE.match(el_id)
+ if not m:
+ continue
+ old_index = int(m.group(1))
+ new_index = glyph_index_map.get(old_index)
+ if new_index is not None:
+ if old_index == new_index:
+ continue
+ new_id = f"glyph{new_index}"
+ else:
+ # If the old index is missing, the element correspond to a glyph that was
+ # excluded from the font's subset.
+ # We rename it to avoid clashes with the new GIDs or other element ids.
+ new_id = f".{el_id}"
+ n = count(1)
+ while new_id in elements:
+ new_id = f"{new_id}.{next(n)}"
+
+ id_map[el_id] = new_id
+ el.attrib["id"] = new_id
+
+ return id_map
+
+
+def href_local_target(el: etree.Element) -> Optional[str]:
+ if XLINK_HREF in el.attrib:
+ href = el.attrib[XLINK_HREF]
+ if href.startswith("#") and len(href) > 1:
+ return href[1:] # drop the leading #
+ return None
+
+
+def update_glyph_href_links(svg: etree.Element, id_map: Dict[str, str]) -> None:
+ # update all xlink:href="#glyph..." attributes to point to the new glyph ids
+ for el in xpath(".//svg:*[starts-with(@xlink:href, '#glyph')]")(svg):
+ old_id = href_local_target(el)
+ assert old_id is not None
+ if old_id in id_map:
+ new_id = id_map[old_id]
+ el.attrib[XLINK_HREF] = f"#{new_id}"
+
+
+def ranges(ints: Iterable[int]) -> Iterator[Tuple[int, int]]:
+ # Yield sorted, non-overlapping (min, max) ranges of consecutive integers
+ sorted_ints = iter(sorted(set(ints)))
+ try:
+ start = end = next(sorted_ints)
+ except StopIteration:
+ return
+ for v in sorted_ints:
+ if v - 1 == end:
+ end = v
+ else:
+ yield (start, end)
+ start = end = v
+ yield (start, end)
+
+
+@_add_method(ttLib.getTableClass("SVG "))
+def subset_glyphs(self, s) -> bool:
+ if etree is None:
+ raise ModuleNotFoundError("No module named 'lxml', required to subset SVG")
+
+ # glyph names (before subsetting)
+ glyph_order: List[str] = s.orig_glyph_order
+ # map from glyph names to original glyph indices
+ rev_orig_glyph_map: Dict[str, int] = s.reverseOrigGlyphMap
+ # map from original to new glyph indices (after subsetting)
+ glyph_index_map: Dict[int, int] = s.glyph_index_map
+
+ new_docs: List[Tuple[bytes, int, int]] = []
+ for doc, start, end in self.docList:
+
+ glyphs = {glyph_order[i] for i in range(start, end + 1)}.intersection(s.glyphs)
+ if not glyphs:
+ # no intersection: we can drop the whole record
+ continue
+
+ svg = etree.fromstring(
+ # encode because fromstring dislikes xml encoding decl if input is str.
+ # SVG xml encoding must be utf-8 as per OT spec.
+ doc.encode("utf-8"),
+ parser=etree.XMLParser(
+ # Disable libxml2 security restrictions to support very deep trees.
+ # Without this we would get an error like this:
+ # `lxml.etree.XMLSyntaxError: internal error: Huge input lookup`
+ # when parsing big fonts e.g. noto-emoji-picosvg.ttf.
+ huge_tree=True,
+ # ignore blank text as it's not meaningful in OT-SVG; it also prevents
+ # dangling tail text after removing an element when pretty_print=True
+ remove_blank_text=True,
+ ),
+ )
+
+ elements = group_elements_by_id(svg)
+ gids = {rev_orig_glyph_map[g] for g in glyphs}
+ element_ids = {f"glyph{i}" for i in gids}
+ closure_element_ids(elements, element_ids)
+
+ if not subset_elements(svg, element_ids):
+ continue
+
+ if not s.options.retain_gids:
+ id_map = remap_glyph_ids(svg, glyph_index_map)
+ update_glyph_href_links(svg, id_map)
+
+ new_doc = etree.tostring(svg, pretty_print=s.options.pretty_svg).decode("utf-8")
+
+ new_gids = (glyph_index_map[i] for i in gids)
+ for start, end in ranges(new_gids):
+ new_docs.append((new_doc, start, end))
+
+ self.docList = new_docs
+
+ return bool(self.docList)
diff --git a/Lib/fontTools/subset/util.py b/Lib/fontTools/subset/util.py
new file mode 100644
index 00000000..d20e925d
--- /dev/null
+++ b/Lib/fontTools/subset/util.py
@@ -0,0 +1,25 @@
+"""Private utility methods used by the subset modules"""
+
+
+def _add_method(*clazzes):
+ """Returns a decorator function that adds a new method to one or
+ more classes."""
+
+ def wrapper(method):
+ done = []
+ for clazz in clazzes:
+ if clazz in done:
+ continue # Support multiple names of a clazz
+ done.append(clazz)
+ assert clazz.__name__ != "DefaultTable", "Oops, table class not found."
+ assert not hasattr(
+ clazz, method.__name__
+ ), "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
+ setattr(clazz, method.__name__, method)
+ return None
+
+ return wrapper
+
+
+def _uniq_sort(l):
+ return sorted(set(l))
diff --git a/Lib/fontTools/svgLib/path/__init__.py b/Lib/fontTools/svgLib/path/__init__.py
index 9440429b..fbddeeab 100644
--- a/Lib/fontTools/svgLib/path/__init__.py
+++ b/Lib/fontTools/svgLib/path/__init__.py
@@ -1,7 +1,6 @@
-from fontTools.misc.py23 import tostr
-
from fontTools.pens.transformPen import TransformPen
from fontTools.misc import etree
+from fontTools.misc.textTools import tostr
from .parser import parse_path
from .shapes import PathBuilder
diff --git a/Lib/fontTools/t1Lib/__init__.py b/Lib/fontTools/t1Lib/__init__.py
index e1d94d35..a74f9a47 100644
--- a/Lib/fontTools/t1Lib/__init__.py
+++ b/Lib/fontTools/t1Lib/__init__.py
@@ -15,14 +15,17 @@ write(path, data, kind='OTHER', dohex=False)
part should be written as hexadecimal or binary, but only if kind
is 'OTHER'.
"""
-from fontTools.misc.py23 import bytechr, byteord, bytesjoin
+import fontTools
from fontTools.misc import eexec
from fontTools.misc.macCreatorType import getMacCreatorAndType
+from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes
+from fontTools.misc.psOperators import _type1_pre_eexec_order, _type1_fontinfo_order, _type1_post_eexec_order
+from fontTools.encodings.StandardEncoding import StandardEncoding
import os
import re
__author__ = "jvr"
-__version__ = "1.0b2"
+__version__ = "1.0b3"
DEBUG = 0
@@ -65,8 +68,8 @@ class T1Font(object):
write(path, self.getData(), type, dohex)
def getData(self):
- # XXX Todo: if the data has been converted to Python object,
- # recreate the PS stream
+ if not hasattr(self, "data"):
+ self.data = self.createData()
return self.data
def getGlyphSet(self):
@@ -102,6 +105,148 @@ class T1Font(object):
subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs)
del self.data
+ def createData(self):
+ sf = self.font
+
+ eexec_began = False
+ eexec_dict = {}
+ lines = []
+ lines.extend([self._tobytes(f"%!FontType1-1.1: {sf['FontName']}"),
+ self._tobytes(f"%t1Font: ({fontTools.version})"),
+ self._tobytes(f"%%BeginResource: font {sf['FontName']}")])
+ # follow t1write.c:writeRegNameKeyedFont
+ size = 3 # Headroom for new key addition
+ size += 1 # FontMatrix is always counted
+ size += 1 + 1 # Private, CharStings
+ for key in font_dictionary_keys:
+ size += int(key in sf)
+ lines.append(self._tobytes(f"{size} dict dup begin"))
+
+ for key, value in sf.items():
+ if eexec_began:
+ eexec_dict[key] = value
+ continue
+
+ if key == "FontInfo":
+ fi = sf["FontInfo"]
+ # follow t1write.c:writeFontInfoDict
+ size = 3 # Headroom for new key addition
+ for subkey in FontInfo_dictionary_keys:
+ size += int(subkey in fi)
+ lines.append(self._tobytes(f"/FontInfo {size} dict dup begin"))
+
+ for subkey, subvalue in fi.items():
+ lines.extend(self._make_lines(subkey, subvalue))
+ lines.append(b"end def")
+ elif key in _type1_post_eexec_order: # usually 'Private'
+ eexec_dict[key] = value
+ eexec_began = True
+ else:
+ lines.extend(self._make_lines(key, value))
+ lines.append(b"end")
+ eexec_portion = self.encode_eexec(eexec_dict)
+ lines.append(bytesjoin([b"currentfile eexec ", eexec_portion]))
+
+ for _ in range(8):
+ lines.append(self._tobytes("0"*64))
+ lines.extend([b"cleartomark",
+ b"%%EndResource",
+ b"%%EOF"])
+
+ data = bytesjoin(lines, "\n")
+ return data
+
+ def encode_eexec(self, eexec_dict):
+ lines = []
+
+ # '-|', '|-', '|'
+ RD_key, ND_key, NP_key = None, None, None
+
+ for key, value in eexec_dict.items():
+ if key == "Private":
+ pr = eexec_dict["Private"]
+ # follow t1write.c:writePrivateDict
+ size = 3 # for RD, ND, NP
+ for subkey in Private_dictionary_keys:
+ size += int(subkey in pr)
+ lines.append(b"dup /Private")
+ lines.append(self._tobytes(f"{size} dict dup begin"))
+ for subkey, subvalue in pr.items():
+ if not RD_key and subvalue == RD_value:
+ RD_key = subkey
+ elif not ND_key and subvalue == ND_value:
+ ND_key = subkey
+ elif not NP_key and subvalue == PD_value:
+ NP_key = subkey
+
+ if subkey == 'OtherSubrs':
+ # XXX: assert that no flex hint is used
+ lines.append(self._tobytes(hintothers))
+ elif subkey == "Subrs":
+ # XXX: standard Subrs only
+ lines.append(b"/Subrs 5 array")
+ for i, subr_bin in enumerate(std_subrs):
+ encrypted_subr, R = eexec.encrypt(bytesjoin([char_IV, subr_bin]), 4330)
+ lines.append(bytesjoin([self._tobytes(f"dup {i} {len(encrypted_subr)} {RD_key} "), encrypted_subr, self._tobytes(f" {NP_key}")]))
+ lines.append(b'def')
+
+ lines.append(b"put")
+ else:
+ lines.extend(self._make_lines(subkey, subvalue))
+ elif key == "CharStrings":
+ lines.append(b"dup /CharStrings")
+ lines.append(self._tobytes(f"{len(eexec_dict['CharStrings'])} dict dup begin"))
+ for glyph_name, char_bin in eexec_dict["CharStrings"].items():
+ char_bin.compile()
+ encrypted_char, R = eexec.encrypt(bytesjoin([char_IV, char_bin.bytecode]), 4330)
+ lines.append(bytesjoin([self._tobytes(f"/{glyph_name} {len(encrypted_char)} {RD_key} "), encrypted_char, self._tobytes(f" {ND_key}")]))
+ lines.append(b"end put")
+ else:
+ lines.extend(self._make_lines(key, value))
+
+ lines.extend([b"end",
+ b"dup /FontName get exch definefont pop",
+ b"mark",
+ b"currentfile closefile\n"])
+
+ eexec_portion = bytesjoin(lines, "\n")
+ encrypted_eexec, R = eexec.encrypt(bytesjoin([eexec_IV, eexec_portion]), 55665)
+
+ return encrypted_eexec
+
+ def _make_lines(self, key, value):
+ if key == "FontName":
+ return [self._tobytes(f"/{key} /{value} def")]
+ if key in ["isFixedPitch", "ForceBold", "RndStemUp"]:
+ return [self._tobytes(f"/{key} {'true' if value else 'false'} def")]
+ elif key == "Encoding":
+ if value == StandardEncoding:
+ return [self._tobytes(f"/{key} StandardEncoding def")]
+ else:
+ # follow fontTools.misc.psOperators._type1_Encoding_repr
+ lines = []
+ lines.append(b"/Encoding 256 array")
+ lines.append(b"0 1 255 {1 index exch /.notdef put} for")
+ for i in range(256):
+ name = value[i]
+ if name != ".notdef":
+ lines.append(self._tobytes(f"dup {i} /{name} put"))
+ lines.append(b"def")
+ return lines
+ if isinstance(value, str):
+ return [self._tobytes(f"/{key} ({value}) def")]
+ elif isinstance(value, bool):
+ return [self._tobytes(f"/{key} {'true' if value else 'false'} def")]
+ elif isinstance(value, list):
+ return [self._tobytes(f"/{key} [{' '.join(str(v) for v in value)}] def")]
+ elif isinstance(value, tuple):
+ return [self._tobytes(f"/{key} {{{' '.join(str(v) for v in value)}}} def")]
+ else:
+ return [self._tobytes(f"/{key} {value} def")]
+
+ def _tobytes(self, s, errors="strict"):
+ return tobytes(s, self.encoding, errors)
+
# low level T1 data read and write functions
@@ -367,3 +512,69 @@ def stringToLong(s):
for i in range(4):
l += byteord(s[i]) << (i * 8)
return l
+
+
+# PS stream helpers
+
+font_dictionary_keys = list(_type1_pre_eexec_order)
+# t1write.c:writeRegNameKeyedFont
+# always counts following keys
+font_dictionary_keys.remove("FontMatrix")
+
+FontInfo_dictionary_keys = list(_type1_fontinfo_order)
+# extend because AFDKO tx may use following keys
+FontInfo_dictionary_keys.extend([
+ "FSType",
+ "Copyright",
+])
+
+Private_dictionary_keys = [
+ # We don't know what names will be actually used.
+ # "RD",
+ # "ND",
+ # "NP",
+ "Subrs",
+ "OtherSubrs",
+ "UniqueID",
+ "BlueValues",
+ "OtherBlues",
+ "FamilyBlues",
+ "FamilyOtherBlues",
+ "BlueScale",
+ "BlueShift",
+ "BlueFuzz",
+ "StdHW",
+ "StdVW",
+ "StemSnapH",
+ "StemSnapV",
+ "ForceBold",
+ "LanguageGroup",
+ "password",
+ "lenIV",
+ "MinFeature",
+ "RndStemUp",
+]
+
+# t1write_hintothers.h
+hintothers = """/OtherSubrs[{}{}{}{systemdict/internaldict known not{pop 3}{1183615869
+systemdict/internaldict get exec dup/startlock known{/startlock get exec}{dup
+/strtlck known{/strtlck get exec}{pop 3}ifelse}ifelse}ifelse}executeonly]def"""
+# t1write.c:saveStdSubrs
+std_subrs = [
+ # 3 0 callother pop pop setcurrentpoint return
+ b"\x8e\x8b\x0c\x10\x0c\x11\x0c\x11\x0c\x21\x0b",
+ # 0 1 callother return
+ b"\x8b\x8c\x0c\x10\x0b",
+ # 0 2 callother return
+ b"\x8b\x8d\x0c\x10\x0b",
+ # return
+ b"\x0b",
+ # 3 1 3 callother pop callsubr return
+ b"\x8e\x8c\x8e\x0c\x10\x0c\x11\x0a\x0b"
+]
+# follow t1write.c:writeRegNameKeyedFont
+eexec_IV = b"cccc"
+char_IV = b"\x0c\x0c\x0c\x0c"
+RD_value = ("string", "currentfile", "exch", "readstring", "pop")
+ND_value = ("def",)
+PD_value = ("put",)
diff --git a/Lib/fontTools/tfmLib.py b/Lib/fontTools/tfmLib.py
new file mode 100644
index 00000000..673373ff
--- /dev/null
+++ b/Lib/fontTools/tfmLib.py
@@ -0,0 +1,460 @@
+"""Module for reading TFM (TeX Font Metrics) files.
+
+The TFM format is described in the TFtoPL WEB source code, whose typeset form
+can be found on `CTAN <http://mirrors.ctan.org/info/knuth-pdf/texware/tftopl.pdf>`_.
+
+ >>> from fontTools.tfmLib import TFM
+ >>> tfm = TFM("Tests/tfmLib/data/cmr10.tfm")
+ >>>
+ >>> # Accessing an attribute gets you metadata.
+ >>> tfm.checksum
+ 1274110073
+ >>> tfm.designsize
+ 10.0
+ >>> tfm.codingscheme
+ 'TeX text'
+ >>> tfm.family
+ 'CMR'
+ >>> tfm.seven_bit_safe_flag
+ False
+ >>> tfm.face
+ 234
+ >>> tfm.extraheader
+ {}
+ >>> tfm.fontdimens
+ {'SLANT': 0.0, 'SPACE': 0.33333396911621094, 'STRETCH': 0.16666698455810547, 'SHRINK': 0.11111164093017578, 'XHEIGHT': 0.4305553436279297, 'QUAD': 1.0000028610229492, 'EXTRASPACE': 0.11111164093017578}
+ >>> # Accessing a character gets you its metrics.
+ >>> # “width” is always available, other metrics are available only when
+ >>> # applicable. All values are relative to “designsize”.
+ >>> tfm.chars[ord("g")]
+ {'width': 0.5000019073486328, 'height': 0.4305553436279297, 'depth': 0.1944446563720703, 'italic': 0.013888359069824219}
+ >>> # Kerning and ligature can be accessed as well.
+ >>> tfm.kerning[ord("c")]
+ {104: -0.02777862548828125, 107: -0.02777862548828125}
+ >>> tfm.ligatures[ord("f")]
+ {105: ('LIG', 12), 102: ('LIG', 11), 108: ('LIG', 13)}
+"""
+
+from types import SimpleNamespace
+
+from fontTools.misc.sstruct import calcsize, unpack, unpack2
+
+SIZES_FORMAT = """
+ >
+ lf: h # length of the entire file, in words
+ lh: h # length of the header data, in words
+ bc: h # smallest character code in the font
+ ec: h # largest character code in the font
+ nw: h # number of words in the width table
+ nh: h # number of words in the height table
+ nd: h # number of words in the depth table
+ ni: h # number of words in the italic correction table
+ nl: h # number of words in the ligature/kern table
+ nk: h # number of words in the kern table
+ ne: h # number of words in the extensible character table
+ np: h # number of font parameter words
+"""
+
+SIZES_SIZE = calcsize(SIZES_FORMAT)
+
+FIXED_FORMAT = "12.20F"
+
+HEADER_FORMAT1 = f"""
+ >
+ checksum: L
+ designsize: {FIXED_FORMAT}
+"""
+
+HEADER_FORMAT2 = f"""
+ {HEADER_FORMAT1}
+ codingscheme: 40p
+"""
+
+HEADER_FORMAT3 = f"""
+ {HEADER_FORMAT2}
+ family: 20p
+"""
+
+HEADER_FORMAT4 = f"""
+ {HEADER_FORMAT3}
+ seven_bit_safe_flag: ?
+ ignored: x
+ ignored: x
+ face: B
+"""
+
+HEADER_SIZE1 = calcsize(HEADER_FORMAT1)
+HEADER_SIZE2 = calcsize(HEADER_FORMAT2)
+HEADER_SIZE3 = calcsize(HEADER_FORMAT3)
+HEADER_SIZE4 = calcsize(HEADER_FORMAT4)
+
+LIG_KERN_COMMAND = """
+ >
+ skip_byte: B
+ next_char: B
+ op_byte: B
+ remainder: B
+"""
+
+BASE_PARAMS = [
+ "SLANT",
+ "SPACE",
+ "STRETCH",
+ "SHRINK",
+ "XHEIGHT",
+ "QUAD",
+ "EXTRASPACE",
+]
+
+MATHSY_PARAMS = [
+ "NUM1",
+ "NUM2",
+ "NUM3",
+ "DENOM1",
+ "DENOM2",
+ "SUP1",
+ "SUP2",
+ "SUP3",
+ "SUB1",
+ "SUB2",
+ "SUPDROP",
+ "SUBDROP",
+ "DELIM1",
+ "DELIM2",
+ "AXISHEIGHT",
+]
+
+MATHEX_PARAMS = [
+ "DEFAULTRULETHICKNESS",
+ "BIGOPSPACING1",
+ "BIGOPSPACING2",
+ "BIGOPSPACING3",
+ "BIGOPSPACING4",
+ "BIGOPSPACING5",
+]
+
+VANILLA = 0
+MATHSY = 1
+MATHEX = 2
+
+UNREACHABLE = 0
+PASSTHROUGH = 1
+ACCESSABLE = 2
+
+NO_TAG = 0
+LIG_TAG = 1
+LIST_TAG = 2
+EXT_TAG = 3
+
+STOP_FLAG = 128
+KERN_FLAG = 128
+
+
+class TFMException(Exception):
+ def __init__(self, message):
+ super().__init__(message)
+
+
+class TFM:
+ def __init__(self, file):
+ self._read(file)
+
+ def __repr__(self):
+ return (
+ f"<TFM"
+ f" for {self.family}"
+ f" in {self.codingscheme}"
+ f" at {self.designsize:g}pt>"
+ )
+
+ def _read(self, file):
+ if hasattr(file, "read"):
+ data = file.read()
+ else:
+ with open(file, "rb") as fp:
+ data = fp.read()
+
+ self._data = data
+
+ if len(data) < SIZES_SIZE:
+ raise TFMException("Too short input file")
+
+ sizes = SimpleNamespace()
+ unpack2(SIZES_FORMAT, data, sizes)
+
+ # Do some file structure sanity checks.
+ # TeX and TFtoPL do additional functional checks and might even correct
+ # “errors” in the input file, but we instead try to output the file as
+ # it is as long as it is parsable, even if the data make no sense.
+
+ if sizes.lf < 0:
+ raise TFMException("The file claims to have negative or zero length!")
+
+ if len(data) < sizes.lf * 4:
+ raise TFMException("The file has fewer bytes than it claims!")
+
+ for name, length in vars(sizes).items():
+ if length < 0:
+ raise TFMException("The subfile size: '{name}' is negative!")
+
+ if sizes.lh < 2:
+ raise TFMException(f"The header length is only {sizes.lh}!")
+
+ if sizes.bc > sizes.ec + 1 or sizes.ec > 255:
+ raise TFMException(
+ f"The character code range {sizes.bc}..{sizes.ec} is illegal!"
+ )
+
+ if sizes.nw == 0 or sizes.nh == 0 or sizes.nd == 0 or sizes.ni == 0:
+ raise TFMException("Incomplete subfiles for character dimensions!")
+
+ if sizes.ne > 256:
+ raise TFMException(f"There are {ne} extensible recipes!")
+
+ if sizes.lf != (
+ 6
+ + sizes.lh
+ + (sizes.ec - sizes.bc + 1)
+ + sizes.nw
+ + sizes.nh
+ + sizes.nd
+ + sizes.ni
+ + sizes.nl
+ + sizes.nk
+ + sizes.ne
+ + sizes.np
+ ):
+ raise TFMException("Subfile sizes don’t add up to the stated total")
+
+ # Subfile offsets, used in the helper function below. These all are
+ # 32-bit word offsets not 8-bit byte offsets.
+ char_base = 6 + sizes.lh - sizes.bc
+ width_base = char_base + sizes.ec + 1
+ height_base = width_base + sizes.nw
+ depth_base = height_base + sizes.nh
+ italic_base = depth_base + sizes.nd
+ lig_kern_base = italic_base + sizes.ni
+ kern_base = lig_kern_base + sizes.nl
+ exten_base = kern_base + sizes.nk
+ param_base = exten_base + sizes.ne
+
+ # Helper functions for accessing individual data. If this looks
+ # nonidiomatic Python, I blame the effect of reading the literate WEB
+ # documentation of TFtoPL.
+ def char_info(c):
+ return 4 * (char_base + c)
+
+ def width_index(c):
+ return data[char_info(c)]
+
+ def noneexistent(c):
+ return c < sizes.bc or c > sizes.ec or width_index(c) == 0
+
+ def height_index(c):
+ return data[char_info(c) + 1] // 16
+
+ def depth_index(c):
+ return data[char_info(c) + 1] % 16
+
+ def italic_index(c):
+ return data[char_info(c) + 2] // 4
+
+ def tag(c):
+ return data[char_info(c) + 2] % 4
+
+ def remainder(c):
+ return data[char_info(c) + 3]
+
+ def width(c):
+ r = 4 * (width_base + width_index(c))
+ return read_fixed(r, "v")["v"]
+
+ def height(c):
+ r = 4 * (height_base + height_index(c))
+ return read_fixed(r, "v")["v"]
+
+ def depth(c):
+ r = 4 * (depth_base + depth_index(c))
+ return read_fixed(r, "v")["v"]
+
+ def italic(c):
+ r = 4 * (italic_base + italic_index(c))
+ return read_fixed(r, "v")["v"]
+
+ def exten(c):
+ return 4 * (exten_base + remainder(c))
+
+ def lig_step(i):
+ return 4 * (lig_kern_base + i)
+
+ def lig_kern_command(i):
+ command = SimpleNamespace()
+ unpack2(LIG_KERN_COMMAND, data[i:], command)
+ return command
+
+ def kern(i):
+ r = 4 * (kern_base + i)
+ return read_fixed(r, "v")["v"]
+
+ def param(i):
+ return 4 * (param_base + i)
+
+ def read_fixed(index, key, obj=None):
+ ret = unpack2(f">;{key}:{FIXED_FORMAT}", data[index:], obj)
+ return ret[0]
+
+ # Set all attributes to empty values regardless of the header size.
+ unpack(HEADER_FORMAT4, [0] * HEADER_SIZE4, self)
+
+ offset = 24
+ length = sizes.lh * 4
+ self.extraheader = {}
+ if length >= HEADER_SIZE4:
+ rest = unpack2(HEADER_FORMAT4, data[offset:], self)[1]
+ if self.face < 18:
+ s = self.face % 2
+ b = self.face // 2
+ self.face = "MBL"[b % 3] + "RI"[s] + "RCE"[b // 3]
+ for i in range(sizes.lh - HEADER_SIZE4 // 4):
+ rest = unpack2(f">;HEADER{i + 18}:l", rest, self.extraheader)[1]
+ elif length >= HEADER_SIZE3:
+ unpack2(HEADER_FORMAT3, data[offset:], self)
+ elif length >= HEADER_SIZE2:
+ unpack2(HEADER_FORMAT2, data[offset:], self)
+ elif length >= HEADER_SIZE1:
+ unpack2(HEADER_FORMAT1, data[offset:], self)
+
+ self.fonttype = VANILLA
+ scheme = self.codingscheme.upper()
+ if scheme.startswith("TEX MATH SY"):
+ self.fonttype = MATHSY
+ elif scheme.startswith("TEX MATH EX"):
+ self.fonttype = MATHEX
+
+ self.fontdimens = {}
+ for i in range(sizes.np):
+ name = f"PARAMETER{i+1}"
+ if i <= 6:
+ name = BASE_PARAMS[i]
+ elif self.fonttype == MATHSY and i <= 21:
+ name = MATHSY_PARAMS[i - 7]
+ elif self.fonttype == MATHEX and i <= 12:
+ name = MATHEX_PARAMS[i - 7]
+ read_fixed(param(i), name, self.fontdimens)
+
+ lig_kern_map = {}
+ self.right_boundary_char = None
+ self.left_boundary_char = None
+ if sizes.nl > 0:
+ cmd = lig_kern_command(lig_step(0))
+ if cmd.skip_byte == 255:
+ self.right_boundary_char = cmd.next_char
+
+ cmd = lig_kern_command(lig_step((sizes.nl - 1)))
+ if cmd.skip_byte == 255:
+ self.left_boundary_char = 256
+ r = 256 * cmd.op_byte + cmd.remainder
+ lig_kern_map[self.left_boundary_char] = r
+
+ self.chars = {}
+ for c in range(sizes.bc, sizes.ec + 1):
+ if width_index(c) > 0:
+ self.chars[c] = info = {}
+ info["width"] = width(c)
+ if height_index(c) > 0:
+ info["height"] = height(c)
+ if depth_index(c) > 0:
+ info["depth"] = depth(c)
+ if italic_index(c) > 0:
+ info["italic"] = italic(c)
+ char_tag = tag(c)
+ if char_tag == NO_TAG:
+ pass
+ elif char_tag == LIG_TAG:
+ lig_kern_map[c] = remainder(c)
+ elif char_tag == LIST_TAG:
+ info["nextlarger"] = remainder(c)
+ elif char_tag == EXT_TAG:
+ info["varchar"] = varchar = {}
+ for i in range(4):
+ part = data[exten(c) + i]
+ if i == 3 or part > 0:
+ name = "rep"
+ if i == 0:
+ name = "top"
+ elif i == 1:
+ name = "mid"
+ elif i == 2:
+ name = "bot"
+ if noneexistent(part):
+ varchar[name] = c
+ else:
+ varchar[name] = part
+
+ self.ligatures = {}
+ self.kerning = {}
+ for c, i in sorted(lig_kern_map.items()):
+ cmd = lig_kern_command(lig_step(i))
+ if cmd.skip_byte > STOP_FLAG:
+ i = 256 * cmd.op_byte + cmd.remainder
+
+ while i < sizes.nl:
+ cmd = lig_kern_command(lig_step(i))
+ if cmd.skip_byte > STOP_FLAG:
+ pass
+ else:
+ if cmd.op_byte >= KERN_FLAG:
+ r = 256 * (cmd.op_byte - KERN_FLAG) + cmd.remainder
+ self.kerning.setdefault(c, {})[cmd.next_char] = kern(r)
+ else:
+ r = cmd.op_byte
+ if r == 4 or (r > 7 and r != 11):
+ # Ligature step with nonstandard code, we output
+ # the code verbatim.
+ lig = r
+ else:
+ lig = ""
+ if r % 4 > 1:
+ lig += "/"
+ lig += "LIG"
+ if r % 2 != 0:
+ lig += "/"
+ while r > 3:
+ lig += ">"
+ r -= 4
+ self.ligatures.setdefault(c, {})[cmd.next_char] = (
+ lig,
+ cmd.remainder,
+ )
+
+ if cmd.skip_byte >= STOP_FLAG:
+ break
+ i += cmd.skip_byte + 1
+
+
+if __name__ == "__main__":
+ import sys
+
+ tfm = TFM(sys.argv[1])
+ print(
+ "\n".join(
+ x
+ for x in [
+ f"tfm.checksum={tfm.checksum}",
+ f"tfm.designsize={tfm.designsize}",
+ f"tfm.codingscheme={tfm.codingscheme}",
+ f"tfm.fonttype={tfm.fonttype}",
+ f"tfm.family={tfm.family}",
+ f"tfm.seven_bit_safe_flag={tfm.seven_bit_safe_flag}",
+ f"tfm.face={tfm.face}",
+ f"tfm.extraheader={tfm.extraheader}",
+ f"tfm.fontdimens={tfm.fontdimens}",
+ f"tfm.right_boundary_char={tfm.right_boundary_char}",
+ f"tfm.left_boundary_char={tfm.left_boundary_char}",
+ f"tfm.kerning={tfm.kerning}",
+ f"tfm.ligatures={tfm.ligatures}",
+ f"tfm.chars={tfm.chars}",
+ ]
+ )
+ )
+ print(tfm)
diff --git a/Lib/fontTools/ttLib/__init__.py b/Lib/fontTools/ttLib/__init__.py
index 16417e73..dadd7f20 100644
--- a/Lib/fontTools/ttLib/__init__.py
+++ b/Lib/fontTools/ttLib/__init__.py
@@ -1,45 +1,4 @@
-"""fontTools.ttLib -- a package for dealing with TrueType fonts.
-
-This package offers translators to convert TrueType fonts to Python
-objects and vice versa, and additionally from Python to TTX (an XML-based
-text format) and vice versa.
-
-Example interactive session:
-
-Python 1.5.2c1 (#43, Mar 9 1999, 13:06:43) [CW PPC w/GUSI w/MSL]
-Copyright 1991-1995 Stichting Mathematisch Centrum, Amsterdam
->> from fontTools import ttLib
->> tt = ttLib.TTFont("afont.ttf")
->> tt['maxp'].numGlyphs
-242
->> tt['OS/2'].achVendID
-'B&H\000'
->> tt['head'].unitsPerEm
-2048
->> tt.saveXML("afont.ttx")
-Dumping 'LTSH' table...
-Dumping 'OS/2' table...
-Dumping 'VDMX' table...
-Dumping 'cmap' table...
-Dumping 'cvt ' table...
-Dumping 'fpgm' table...
-Dumping 'glyf' table...
-Dumping 'hdmx' table...
-Dumping 'head' table...
-Dumping 'hhea' table...
-Dumping 'hmtx' table...
-Dumping 'loca' table...
-Dumping 'maxp' table...
-Dumping 'name' table...
-Dumping 'post' table...
-Dumping 'prep' table...
->> tt2 = ttLib.TTFont()
->> tt2.importXML("afont.ttx")
->> tt2['maxp'].numGlyphs
-242
->>
-
-"""
+"""fontTools.ttLib -- a package for dealing with TrueType fonts."""
from fontTools.misc.loggingTools import deprecateFunction
import logging
diff --git a/Lib/fontTools/ttLib/removeOverlaps.py b/Lib/fontTools/ttLib/removeOverlaps.py
index fb5c77ab..624cd47b 100644
--- a/Lib/fontTools/ttLib/removeOverlaps.py
+++ b/Lib/fontTools/ttLib/removeOverlaps.py
@@ -5,8 +5,9 @@ Requires https://github.com/fonttools/skia-pathops
import itertools
import logging
-from typing import Iterable, Optional, Mapping
+from typing import Callable, Iterable, Optional, Mapping
+from fontTools.misc.roundTools import otRound
from fontTools.ttLib import ttFont
from fontTools.ttLib.tables import _g_l_y_f
from fontTools.ttLib.tables import _h_m_t_x
@@ -18,6 +19,10 @@ import pathops
__all__ = ["removeOverlaps"]
+class RemoveOverlapsError(Exception):
+ pass
+
+
log = logging.getLogger("fontTools.ttLib.removeOverlaps")
_TTGlyphMapping = Mapping[str, ttFont._TTGlyph]
@@ -76,6 +81,49 @@ def ttfGlyphFromSkPath(path: pathops.Path) -> _g_l_y_f.Glyph:
return glyph
+def _round_path(
+ path: pathops.Path, round: Callable[[float], float] = otRound
+) -> pathops.Path:
+ rounded_path = pathops.Path()
+ for verb, points in path:
+ rounded_path.add(verb, *((round(p[0]), round(p[1])) for p in points))
+ return rounded_path
+
+
+def _simplify(path: pathops.Path, debugGlyphName: str) -> pathops.Path:
+ # skia-pathops has a bug where it sometimes fails to simplify paths when there
+ # are float coordinates and control points are very close to one another.
+ # Rounding coordinates to integers works around the bug.
+ # Since we are going to round glyf coordinates later on anyway, here it is
+ # ok(-ish) to also round before simplify. Better than failing the whole process
+ # for the entire font.
+ # https://bugs.chromium.org/p/skia/issues/detail?id=11958
+ # https://github.com/google/fonts/issues/3365
+ # TODO(anthrotype): remove once this Skia bug is fixed
+ try:
+ return pathops.simplify(path, clockwise=path.clockwise)
+ except pathops.PathOpsError:
+ pass
+
+ path = _round_path(path)
+ try:
+ path = pathops.simplify(path, clockwise=path.clockwise)
+ log.debug(
+ "skia-pathops failed to simplify '%s' with float coordinates, "
+ "but succeded using rounded integer coordinates",
+ debugGlyphName,
+ )
+ return path
+ except pathops.PathOpsError as e:
+ if log.isEnabledFor(logging.DEBUG):
+ path.dump()
+ raise RemoveOverlapsError(
+ f"Failed to remove overlaps from glyph {debugGlyphName!r}"
+ ) from e
+
+ raise AssertionError("Unreachable")
+
+
def removeTTGlyphOverlaps(
glyphName: str,
glyphSet: _TTGlyphMapping,
@@ -93,7 +141,7 @@ def removeTTGlyphOverlaps(
path = skPathFromGlyph(glyphName, glyphSet)
# remove overlaps
- path2 = pathops.simplify(path, clockwise=path.clockwise)
+ path2 = _simplify(path, glyphName)
# replace TTGlyph if simplified path is different (ignoring contour order)
if {tuple(c) for c in path.contours} != {tuple(c) for c in path2.contours}:
@@ -115,6 +163,7 @@ def removeOverlaps(
font: ttFont.TTFont,
glyphNames: Optional[Iterable[str]] = None,
removeHinting: bool = True,
+ ignoreErrors=False,
) -> None:
"""Simplify glyphs in TTFont by merging overlapping contours.
@@ -132,6 +181,8 @@ def removeOverlaps(
glyphNames: optional iterable of glyph names (str) to remove overlaps from.
By default, all glyphs in the font are processed.
removeHinting (bool): set to False to keep hinting for unmodified glyphs.
+ ignoreErrors (bool): set to True to ignore errors while removing overlaps,
+ thus keeping the tricky glyphs unchanged (fonttools/fonttools#2363).
"""
try:
glyfTable = font["glyf"]
@@ -159,10 +210,15 @@ def removeOverlaps(
)
modified = set()
for glyphName in glyphNames:
- if removeTTGlyphOverlaps(
- glyphName, glyphSet, glyfTable, hmtxTable, removeHinting
- ):
- modified.add(glyphName)
+ try:
+ if removeTTGlyphOverlaps(
+ glyphName, glyphSet, glyfTable, hmtxTable, removeHinting
+ ):
+ modified.add(glyphName)
+ except RemoveOverlapsError:
+ if not ignoreErrors:
+ raise
+ log.error("Failed to remove overlaps for '%s'", glyphName)
log.debug("Removed overlaps for %s glyphs:\n%s", len(modified), " ".join(modified))
diff --git a/Lib/fontTools/ttLib/sfnt.py b/Lib/fontTools/ttLib/sfnt.py
index d609dc51..e7c06337 100644
--- a/Lib/fontTools/ttLib/sfnt.py
+++ b/Lib/fontTools/ttLib/sfnt.py
@@ -8,13 +8,13 @@ Defines two public classes:
used automatically by ttLib.TTFont.)
The reading and writing of sfnt files is separated in two distinct
-classes, since whenever to number of tables changes or whenever
-a table's length chages you need to rewrite the whole file anyway.
+classes, since whenever the number of tables changes or whenever
+a table's length changes you need to rewrite the whole file anyway.
"""
from io import BytesIO
from types import SimpleNamespace
-from fontTools.misc.py23 import Tag
+from fontTools.misc.textTools import Tag
from fontTools.misc import sstruct
from fontTools.ttLib import TTLibError
import struct
@@ -571,9 +571,6 @@ class WOFFFlavorData():
def calcChecksum(data):
"""Calculate the checksum for an arbitrary block of data.
- Optionally takes a 'start' argument, which allows you to
- calculate a checksum in chunks by feeding it a previous
- result.
If the data length is not a multiple of four, it assumes
it is to be padded with null byte.
diff --git a/Lib/fontTools/ttLib/tables/C_B_D_T_.py b/Lib/fontTools/ttLib/tables/C_B_D_T_.py
index 11bb60b8..adf5447f 100644
--- a/Lib/fontTools/ttLib/tables/C_B_D_T_.py
+++ b/Lib/fontTools/ttLib/tables/C_B_D_T_.py
@@ -3,7 +3,7 @@
# Google Author(s): Matt Fontaine
-from fontTools.misc.py23 import bytesjoin
+from fontTools.misc.textTools import bytesjoin
from fontTools.misc import sstruct
from . import E_B_D_T_
from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
diff --git a/Lib/fontTools/ttLib/tables/C_O_L_R_.py b/Lib/fontTools/ttLib/tables/C_O_L_R_.py
index 4004d417..3528bf5b 100644
--- a/Lib/fontTools/ttLib/tables/C_O_L_R_.py
+++ b/Lib/fontTools/ttLib/tables/C_O_L_R_.py
@@ -9,8 +9,10 @@ from . import DefaultTable
class table_C_O_L_R_(DefaultTable.DefaultTable):
""" This table is structured so that you can treat it like a dictionary keyed by glyph name.
- ttFont['COLR'][<glyphName>] will return the color layers for any glyph
- ttFont['COLR'][<glyphName>] = <value> will set the color layers for any glyph.
+
+ ``ttFont['COLR'][<glyphName>]`` will return the color layers for any glyph.
+
+ ``ttFont['COLR'][<glyphName>] = <value>`` will set the color layers for any glyph.
"""
@staticmethod
diff --git a/Lib/fontTools/ttLib/tables/C_P_A_L_.py b/Lib/fontTools/ttLib/tables/C_P_A_L_.py
index c095095e..1ad342f1 100644
--- a/Lib/fontTools/ttLib/tables/C_P_A_L_.py
+++ b/Lib/fontTools/ttLib/tables/C_P_A_L_.py
@@ -2,8 +2,7 @@
#
# Google Author(s): Behdad Esfahbod
-from fontTools.misc.py23 import bytesjoin
-from fontTools.misc.textTools import safeEval
+from fontTools.misc.textTools import bytesjoin, safeEval
from . import DefaultTable
import array
from collections import namedtuple
diff --git a/Lib/fontTools/ttLib/tables/D_S_I_G_.py b/Lib/fontTools/ttLib/tables/D_S_I_G_.py
index 1a520cab..02fddee6 100644
--- a/Lib/fontTools/ttLib/tables/D_S_I_G_.py
+++ b/Lib/fontTools/ttLib/tables/D_S_I_G_.py
@@ -1,5 +1,4 @@
-from fontTools.misc.py23 import bytesjoin, strjoin, tobytes, tostr
-from fontTools.misc.textTools import safeEval
+from fontTools.misc.textTools import bytesjoin, strjoin, tobytes, tostr, safeEval
from fontTools.misc import sstruct
from . import DefaultTable
import base64
diff --git a/Lib/fontTools/ttLib/tables/DefaultTable.py b/Lib/fontTools/ttLib/tables/DefaultTable.py
index c70480a3..dae83183 100644
--- a/Lib/fontTools/ttLib/tables/DefaultTable.py
+++ b/Lib/fontTools/ttLib/tables/DefaultTable.py
@@ -1,4 +1,4 @@
-from fontTools.misc.py23 import Tag
+from fontTools.misc.textTools import Tag
from fontTools.ttLib import getClassTag
class DefaultTable(object):
diff --git a/Lib/fontTools/ttLib/tables/E_B_D_T_.py b/Lib/fontTools/ttLib/tables/E_B_D_T_.py
index 5d9e7244..0bd2ab99 100644
--- a/Lib/fontTools/ttLib/tables/E_B_D_T_.py
+++ b/Lib/fontTools/ttLib/tables/E_B_D_T_.py
@@ -1,6 +1,5 @@
-from fontTools.misc.py23 import bytechr, byteord, bytesjoin, strjoin
from fontTools.misc import sstruct
-from fontTools.misc.textTools import safeEval, readHex, hexStr, deHexStr
+from fontTools.misc.textTools import bytechr, byteord, bytesjoin, strjoin, safeEval, readHex, hexStr, deHexStr
from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
from . import DefaultTable
import itertools
diff --git a/Lib/fontTools/ttLib/tables/E_B_L_C_.py b/Lib/fontTools/ttLib/tables/E_B_L_C_.py
index 94d40d96..cfdbca7b 100644
--- a/Lib/fontTools/ttLib/tables/E_B_L_C_.py
+++ b/Lib/fontTools/ttLib/tables/E_B_L_C_.py
@@ -1,7 +1,6 @@
-from fontTools.misc.py23 import bytesjoin
from fontTools.misc import sstruct
from . import DefaultTable
-from fontTools.misc.textTools import safeEval
+from fontTools.misc.textTools import bytesjoin, safeEval
from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
import struct
import itertools
diff --git a/Lib/fontTools/ttLib/tables/F__e_a_t.py b/Lib/fontTools/ttLib/tables/F__e_a_t.py
index 7e510614..a444c11d 100644
--- a/Lib/fontTools/ttLib/tables/F__e_a_t.py
+++ b/Lib/fontTools/ttLib/tables/F__e_a_t.py
@@ -11,6 +11,12 @@ Feat_hdr_format='''
'''
class table_F__e_a_t(DefaultTable.DefaultTable):
+ """The ``Feat`` table is used exclusively by the Graphite shaping engine
+ to store features and possible settings specified in GDL. Graphite features
+ determine what rules are applied to transform a glyph stream.
+
+ Not to be confused with ``feat``, or the OpenType Layout tables
+ ``GSUB``/``GPOS``."""
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
diff --git a/Lib/fontTools/ttLib/tables/G_M_A_P_.py b/Lib/fontTools/ttLib/tables/G_M_A_P_.py
index 5b30dcfe..833890da 100644
--- a/Lib/fontTools/ttLib/tables/G_M_A_P_.py
+++ b/Lib/fontTools/ttLib/tables/G_M_A_P_.py
@@ -1,6 +1,5 @@
-from fontTools.misc.py23 import tobytes, tostr
from fontTools.misc import sstruct
-from fontTools.misc.textTools import safeEval
+from fontTools.misc.textTools import tobytes, tostr, safeEval
from . import DefaultTable
GMAPFormat = """
diff --git a/Lib/fontTools/ttLib/tables/G_P_K_G_.py b/Lib/fontTools/ttLib/tables/G_P_K_G_.py
index 7598a62a..4f469c02 100644
--- a/Lib/fontTools/ttLib/tables/G_P_K_G_.py
+++ b/Lib/fontTools/ttLib/tables/G_P_K_G_.py
@@ -1,6 +1,5 @@
-from fontTools.misc.py23 import bytesjoin
from fontTools.misc import sstruct
-from fontTools.misc.textTools import safeEval, readHex
+from fontTools.misc.textTools import bytesjoin, safeEval, readHex
from . import DefaultTable
import sys
import array
diff --git a/Lib/fontTools/ttLib/tables/M_E_T_A_.py b/Lib/fontTools/ttLib/tables/M_E_T_A_.py
index d4f6bc8c..990bfd2d 100644
--- a/Lib/fontTools/ttLib/tables/M_E_T_A_.py
+++ b/Lib/fontTools/ttLib/tables/M_E_T_A_.py
@@ -1,6 +1,5 @@
-from fontTools.misc.py23 import byteord
from fontTools.misc import sstruct
-from fontTools.misc.textTools import safeEval
+from fontTools.misc.textTools import byteord, safeEval
from . import DefaultTable
import pdb
import struct
diff --git a/Lib/fontTools/ttLib/tables/S_I_N_G_.py b/Lib/fontTools/ttLib/tables/S_I_N_G_.py
index dd9b63c4..73246df4 100644
--- a/Lib/fontTools/ttLib/tables/S_I_N_G_.py
+++ b/Lib/fontTools/ttLib/tables/S_I_N_G_.py
@@ -1,6 +1,5 @@
-from fontTools.misc.py23 import bytechr, byteord, tobytes, tostr
from fontTools.misc import sstruct
-from fontTools.misc.textTools import safeEval
+from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval
from . import DefaultTable
SINGFormat = """
diff --git a/Lib/fontTools/ttLib/tables/S_V_G_.py b/Lib/fontTools/ttLib/tables/S_V_G_.py
index 135f2718..bc0e533d 100644
--- a/Lib/fontTools/ttLib/tables/S_V_G_.py
+++ b/Lib/fontTools/ttLib/tables/S_V_G_.py
@@ -1,10 +1,25 @@
-from fontTools.misc.py23 import bytesjoin, strjoin, tobytes, tostr
+"""Compiles/decompiles SVG table.
+
+https://docs.microsoft.com/en-us/typography/opentype/spec/svg
+
+The XML format is:
+
+.. code-block:: xml
+
+ <SVG>
+ <svgDoc endGlyphID="1" startGlyphID="1">
+ <![CDATA[ <complete SVG doc> ]]
+ </svgDoc>
+ ...
+ <svgDoc endGlyphID="n" startGlyphID="m">
+ <![CDATA[ <complete SVG doc> ]]
+ </svgDoc>
+ </SVG>
+"""
+
+from fontTools.misc.textTools import bytesjoin, strjoin, tobytes, tostr
from fontTools.misc import sstruct
from . import DefaultTable
-try:
- import xml.etree.cElementTree as ET
-except ImportError:
- import xml.etree.ElementTree as ET
from io import BytesIO
import struct
import logging
@@ -13,71 +28,15 @@ import logging
log = logging.getLogger(__name__)
-__doc__="""
-Compiles/decompiles version 0 and 1 SVG tables from/to XML.
-
-Version 1 is the first SVG definition, implemented in Mozilla before Aug 2013, now deprecated.
-This module will decompile this correctly, but will compile a version 1 table
-only if you add the secret element "<version1/>" to the SVG element in the TTF file.
-
-Version 0 is the joint Adobe-Mozilla proposal, which supports color palettes.
-
-The XML format is:
-<SVG>
- <svgDoc endGlyphID="1" startGlyphID="1">
- <![CDATA[ <complete SVG doc> ]]
- </svgDoc>
-...
- <svgDoc endGlyphID="n" startGlyphID="m">
- <![CDATA[ <complete SVG doc> ]]
- </svgDoc>
-
- <colorPalettes>
- <colorParamUINameID>n</colorParamUINameID>
- ...
- <colorParamUINameID>m</colorParamUINameID>
- <colorPalette uiNameID="n">
- <colorRecord red="<int>" green="<int>" blue="<int>" alpha="<int>" />
- ...
- <colorRecord red="<int>" green="<int>" blue="<int>" alpha="<int>" />
- </colorPalette>
- ...
- <colorPalette uiNameID="m">
- <colorRecord red="<int> green="<int>" blue="<int>" alpha="<int>" />
- ...
- <colorRecord red=<int>" green="<int>" blue="<int>" alpha="<int>" />
- </colorPalette>
- </colorPalettes>
-</SVG>
-
-Color values must be less than 256.
-
-The number of color records in each </colorPalette> must be the same as
-the number of <colorParamUINameID> elements.
-
-"""
-
-XML = ET.XML
-XMLElement = ET.Element
-xmlToString = ET.tostring
-
SVG_format_0 = """
> # big endian
version: H
offsetToSVGDocIndex: L
- offsetToColorPalettes: L
+ reserved: L
"""
SVG_format_0Size = sstruct.calcsize(SVG_format_0)
-SVG_format_1 = """
- > # big endian
- version: H
- numIndicies: H
-"""
-
-SVG_format_1Size = sstruct.calcsize(SVG_format_1)
-
doc_index_entry_format_0 = """
> # big endian
startGlyphID: H
@@ -88,84 +47,26 @@ doc_index_entry_format_0 = """
doc_index_entry_format_0Size = sstruct.calcsize(doc_index_entry_format_0)
-colorRecord_format_0 = """
- red: B
- green: B
- blue: B
- alpha: B
-"""
class table_S_V_G_(DefaultTable.DefaultTable):
- def __init__(self, tag=None):
- DefaultTable.DefaultTable.__init__(self, tag)
- self.colorPalettes = None
-
def decompile(self, data, ttFont):
- self.docList = None
- self.colorPalettes = None
- pos = 0
- self.version = struct.unpack(">H", data[pos:pos+2])[0]
-
- if self.version == 1:
- # This is pre-standardization version of the table; and obsolete. But we decompile it for now.
- # https://wiki.mozilla.org/SVGOpenTypeFonts
- self.decompile_format_1(data, ttFont)
- else:
- if self.version != 0:
- log.warning(
- "Unknown SVG table version '%s'. Decompiling as version 0.", self.version)
- # This is the standardized version of the table; and current.
- # https://www.microsoft.com/typography/otspec/svg.htm
- self.decompile_format_0(data, ttFont)
-
- def decompile_format_0(self, data, ttFont):
- dummy, data2 = sstruct.unpack2(SVG_format_0, data, self)
+ self.docList = []
+ # Version 0 is the standardized version of the table; and current.
+ # https://www.microsoft.com/typography/otspec/svg.htm
+ sstruct.unpack(SVG_format_0, data[:SVG_format_0Size], self)
+ if self.version != 0:
+ log.warning(
+ "Unknown SVG table version '%s'. Decompiling as version 0.", self.version)
# read in SVG Documents Index
- self.decompileEntryList(data)
-
- # read in colorPalettes table.
- self.colorPalettes = colorPalettes = ColorPalettes()
- pos = self.offsetToColorPalettes
- if pos > 0:
- colorPalettes.numColorParams = numColorParams = struct.unpack(">H", data[pos:pos+2])[0]
- if numColorParams > 0:
- colorPalettes.colorParamUINameIDs = colorParamUINameIDs = []
- pos = pos + 2
- for i in range(numColorParams):
- nameID = struct.unpack(">H", data[pos:pos+2])[0]
- colorParamUINameIDs.append(nameID)
- pos = pos + 2
-
- colorPalettes.numColorPalettes = numColorPalettes = struct.unpack(">H", data[pos:pos+2])[0]
- pos = pos + 2
- if numColorPalettes > 0:
- colorPalettes.colorPaletteList = colorPaletteList = []
- for i in range(numColorPalettes):
- colorPalette = ColorPalette()
- colorPaletteList.append(colorPalette)
- colorPalette.uiNameID = struct.unpack(">H", data[pos:pos+2])[0]
- pos = pos + 2
- colorPalette.paletteColors = paletteColors = []
- for j in range(numColorParams):
- colorRecord, colorPaletteData = sstruct.unpack2(colorRecord_format_0, data[pos:], ColorRecord())
- paletteColors.append(colorRecord)
- pos += 4
-
- def decompile_format_1(self, data, ttFont):
- self.offsetToSVGDocIndex = 2
- self.decompileEntryList(data)
-
- def decompileEntryList(self, data):
# data starts with the first entry of the entry list.
pos = subTableStart = self.offsetToSVGDocIndex
- self.numEntries = numEntries = struct.unpack(">H", data[pos:pos+2])[0]
+ self.numEntries = struct.unpack(">H", data[pos:pos+2])[0]
pos += 2
if self.numEntries > 0:
data2 = data[pos:]
- self.docList = []
- self.entries = entries = []
+ entries = []
for i in range(self.numEntries):
docIndexEntry, data2 = sstruct.unpack2(doc_index_entry_format_0, data2, DocumentIndexEntry())
entries.append(docIndexEntry)
@@ -185,13 +86,6 @@ class table_S_V_G_(DefaultTable.DefaultTable):
self.docList.append( [doc, entry.startGlyphID, entry.endGlyphID] )
def compile(self, ttFont):
- if hasattr(self, "version1"):
- data = self.compileFormat1(ttFont)
- else:
- data = self.compileFormat0(ttFont)
- return data
-
- def compileFormat0(self, ttFont):
version = 0
offsetToSVGDocIndex = SVG_format_0Size # I start the SVGDocIndex right after the header.
# get SGVDoc info.
@@ -201,8 +95,8 @@ class table_S_V_G_(DefaultTable.DefaultTable):
datum = struct.pack(">H",numEntries)
entryList.append(datum)
curOffset = len(datum) + doc_index_entry_format_0Size*numEntries
+ seenDocs = {}
for doc, startGlyphID, endGlyphID in self.docList:
- docOffset = curOffset
docBytes = tobytes(doc, encoding="utf_8")
if getattr(self, "compressed", False) and not docBytes.startswith(b"\x1f\x8b"):
import gzip
@@ -214,63 +108,25 @@ class table_S_V_G_(DefaultTable.DefaultTable):
docBytes = gzipped
del gzipped, bytesIO
docLength = len(docBytes)
- curOffset += docLength
+ if docBytes in seenDocs:
+ docOffset = seenDocs[docBytes]
+ else:
+ docOffset = curOffset
+ curOffset += docLength
+ seenDocs[docBytes] = docOffset
+ docList.append(docBytes)
entry = struct.pack(">HHLL", startGlyphID, endGlyphID, docOffset, docLength)
entryList.append(entry)
- docList.append(docBytes)
entryList.extend(docList)
svgDocData = bytesjoin(entryList)
- # get colorpalette info.
- if self.colorPalettes is None:
- offsetToColorPalettes = 0
- palettesData = ""
- else:
- offsetToColorPalettes = SVG_format_0Size + len(svgDocData)
- dataList = []
- numColorParams = len(self.colorPalettes.colorParamUINameIDs)
- datum = struct.pack(">H", numColorParams)
- dataList.append(datum)
- for uiNameId in self.colorPalettes.colorParamUINameIDs:
- datum = struct.pack(">H", uiNameId)
- dataList.append(datum)
- numColorPalettes = len(self.colorPalettes.colorPaletteList)
- datum = struct.pack(">H", numColorPalettes)
- dataList.append(datum)
- for colorPalette in self.colorPalettes.colorPaletteList:
- datum = struct.pack(">H", colorPalette.uiNameID)
- dataList.append(datum)
- for colorRecord in colorPalette.paletteColors:
- data = struct.pack(">BBBB", colorRecord.red, colorRecord.green, colorRecord.blue, colorRecord.alpha)
- dataList.append(data)
- palettesData = bytesjoin(dataList)
-
- header = struct.pack(">HLL", version, offsetToSVGDocIndex, offsetToColorPalettes)
- data = [header, svgDocData, palettesData]
+ reserved = 0
+ header = struct.pack(">HLL", version, offsetToSVGDocIndex, reserved)
+ data = [header, svgDocData]
data = bytesjoin(data)
return data
- def compileFormat1(self, ttFont):
- version = 1
- numEntries = len(self.docList)
- header = struct.pack(">HH", version, numEntries)
- dataList = [header]
- docList = []
- curOffset = SVG_format_1Size + doc_index_entry_format_0Size*numEntries
- for doc, startGlyphID, endGlyphID in self.docList:
- docOffset = curOffset
- docBytes = tobytes(doc, encoding="utf_8")
- docLength = len(docBytes)
- curOffset += docLength
- entry = struct.pack(">HHLL", startGlyphID, endGlyphID, docOffset, docLength)
- dataList.append(entry)
- docList.append(docBytes)
- dataList.extend(docList)
- data = bytesjoin(dataList)
- return data
-
def toXML(self, writer, ttFont):
- writer.newline()
for doc, startGID, endGID in self.docList:
writer.begintag("svgDoc", startGlyphID=startGID, endGlyphID=endGID)
writer.newline()
@@ -279,33 +135,6 @@ class table_S_V_G_(DefaultTable.DefaultTable):
writer.endtag("svgDoc")
writer.newline()
- if (self.colorPalettes is not None) and (self.colorPalettes.numColorParams is not None):
- writer.begintag("colorPalettes")
- writer.newline()
- for uiNameID in self.colorPalettes.colorParamUINameIDs:
- writer.begintag("colorParamUINameID")
- writer._writeraw(str(uiNameID))
- writer.endtag("colorParamUINameID")
- writer.newline()
- for colorPalette in self.colorPalettes.colorPaletteList:
- writer.begintag("colorPalette", [("uiNameID", str(colorPalette.uiNameID))])
- writer.newline()
- for colorRecord in colorPalette.paletteColors:
- colorAttributes = [
- ("red", hex(colorRecord.red)),
- ("green", hex(colorRecord.green)),
- ("blue", hex(colorRecord.blue)),
- ("alpha", hex(colorRecord.alpha)),
- ]
- writer.begintag("colorRecord", colorAttributes)
- writer.endtag("colorRecord")
- writer.newline()
- writer.endtag("colorPalette")
- writer.newline()
-
- writer.endtag("colorPalettes")
- writer.newline()
-
def fromXML(self, name, attrs, content, ttFont):
if name == "svgDoc":
if not hasattr(self, "docList"):
@@ -315,14 +144,10 @@ class table_S_V_G_(DefaultTable.DefaultTable):
startGID = int(attrs["startGlyphID"])
endGID = int(attrs["endGlyphID"])
self.docList.append( [doc, startGID, endGID] )
- elif name == "colorPalettes":
- self.colorPalettes = ColorPalettes()
- self.colorPalettes.fromXML(name, attrs, content, ttFont)
- if self.colorPalettes.numColorParams == 0:
- self.colorPalettes = None
else:
log.warning("Unknown %s %s", name, content)
+
class DocumentIndexEntry(object):
def __init__(self):
self.startGlyphID = None # USHORT
@@ -332,55 +157,3 @@ class DocumentIndexEntry(object):
def __repr__(self):
return "startGlyphID: %s, endGlyphID: %s, svgDocOffset: %s, svgDocLength: %s" % (self.startGlyphID, self.endGlyphID, self.svgDocOffset, self.svgDocLength)
-
-class ColorPalettes(object):
- def __init__(self):
- self.numColorParams = None # USHORT
- self.colorParamUINameIDs = [] # list of name table name ID values that provide UI description of each color palette.
- self.numColorPalettes = None # USHORT
- self.colorPaletteList = [] # list of ColorPalette records
-
- def fromXML(self, name, attrs, content, ttFont):
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrib, content = element
- if name == "colorParamUINameID":
- uiNameID = int(content[0])
- self.colorParamUINameIDs.append(uiNameID)
- elif name == "colorPalette":
- colorPalette = ColorPalette()
- self.colorPaletteList.append(colorPalette)
- colorPalette.fromXML(name, attrib, content, ttFont)
-
- self.numColorParams = len(self.colorParamUINameIDs)
- self.numColorPalettes = len(self.colorPaletteList)
- for colorPalette in self.colorPaletteList:
- if len(colorPalette.paletteColors) != self.numColorParams:
- raise ValueError("Number of color records in a colorPalette ('%s') does not match the number of colorParamUINameIDs elements ('%s')." % (len(colorPalette.paletteColors), self.numColorParams))
-
-class ColorPalette(object):
- def __init__(self):
- self.uiNameID = None # USHORT. name table ID that describes user interface strings associated with this color palette.
- self.paletteColors = [] # list of ColorRecords
-
- def fromXML(self, name, attrs, content, ttFont):
- self.uiNameID = int(attrs["uiNameID"])
- for element in content:
- if isinstance(element, type("")):
- continue
- name, attrib, content = element
- if name == "colorRecord":
- colorRecord = ColorRecord()
- self.paletteColors.append(colorRecord)
- colorRecord.red = eval(attrib["red"])
- colorRecord.green = eval(attrib["green"])
- colorRecord.blue = eval(attrib["blue"])
- colorRecord.alpha = eval(attrib["alpha"])
-
-class ColorRecord(object):
- def __init__(self):
- self.red = 255 # all are one byte values.
- self.green = 255
- self.blue = 255
- self.alpha = 255
diff --git a/Lib/fontTools/ttLib/tables/S__i_l_f.py b/Lib/fontTools/ttLib/tables/S__i_l_f.py
index 95880b07..f326c386 100644
--- a/Lib/fontTools/ttLib/tables/S__i_l_f.py
+++ b/Lib/fontTools/ttLib/tables/S__i_l_f.py
@@ -1,7 +1,6 @@
-from fontTools.misc.py23 import byteord
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr
-from fontTools.misc.textTools import safeEval
+from fontTools.misc.textTools import byteord, safeEval
# from itertools import *
from . import DefaultTable
from . import grUtils
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_V_.py b/Lib/fontTools/ttLib/tables/T_S_I_V_.py
index 80214452..c1e244c6 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I_V_.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I_V_.py
@@ -1,4 +1,4 @@
-from fontTools.misc.py23 import strjoin, tobytes, tostr
+from fontTools.misc.textTools import strjoin, tobytes, tostr
from . import asciiTable
class table_T_S_I_V_(asciiTable.asciiTable):
diff --git a/Lib/fontTools/ttLib/tables/T_S_I__1.py b/Lib/fontTools/ttLib/tables/T_S_I__1.py
index 9ae7acd6..7f7608b2 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I__1.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I__1.py
@@ -4,9 +4,9 @@ tool to store its hinting source data.
TSI1 contains the text of the glyph programs in the form of low-level assembly
code, as well as the 'extra' programs 'fpgm', 'ppgm' (i.e. 'prep'), and 'cvt'.
"""
-from fontTools.misc.py23 import strjoin, tobytes, tostr
from . import DefaultTable
from fontTools.misc.loggingTools import LogMixin
+from fontTools.misc.textTools import strjoin, tobytes, tostr
class table_T_S_I__1(LogMixin, DefaultTable.DefaultTable):
diff --git a/Lib/fontTools/ttLib/tables/TupleVariation.py b/Lib/fontTools/ttLib/tables/TupleVariation.py
index a63fb6c6..9c2895e4 100644
--- a/Lib/fontTools/ttLib/tables/TupleVariation.py
+++ b/Lib/fontTools/ttLib/tables/TupleVariation.py
@@ -1,4 +1,3 @@
-from fontTools.misc.py23 import bytechr, byteord, bytesjoin
from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
floatToFixed as fl2fi,
@@ -8,6 +7,7 @@ from fontTools.misc.fixedTools import (
)
from fontTools.misc.textTools import safeEval
import array
+from collections import Counter, defaultdict
import io
import logging
import struct
@@ -38,7 +38,7 @@ class TupleVariation(object):
def __init__(self, axes, coordinates):
self.axes = axes.copy()
- self.coordinates = coordinates[:]
+ self.coordinates = list(coordinates)
def __repr__(self):
axes = ",".join(sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()]))
@@ -48,11 +48,12 @@ class TupleVariation(object):
return self.coordinates == other.coordinates and self.axes == other.axes
def getUsedPoints(self):
- result = set()
- for i, point in enumerate(self.coordinates):
- if point is not None:
- result.add(i)
- return result
+ # Empty set means "all points used".
+ if None not in self.coordinates:
+ return frozenset()
+ used = frozenset([i for i,p in enumerate(self.coordinates) if p is not None])
+ # Return None if no points used.
+ return used if used else None
def hasImpact(self):
"""Returns True if this TupleVariation has any visible impact.
@@ -126,15 +127,21 @@ class TupleVariation(object):
log.warning("bad delta format: %s" %
", ".join(sorted(attrs.keys())))
- def compile(self, axisTags, sharedCoordIndices, sharedPoints):
+ def compile(self, axisTags, sharedCoordIndices={}, pointData=None):
+ assert set(self.axes.keys()) <= set(axisTags), ("Unknown axis tag found.", self.axes.keys(), axisTags)
+
tupleData = []
+ auxData = []
- assert all(tag in axisTags for tag in self.axes.keys()), ("Unknown axis tag found.", self.axes.keys(), axisTags)
+ if pointData is None:
+ usedPoints = self.getUsedPoints()
+ if usedPoints is None: # Nothing to encode
+ return b'', b''
+ pointData = self.compilePoints(usedPoints)
coord = self.compileCoord(axisTags)
- if coord in sharedCoordIndices:
- flags = sharedCoordIndices[coord]
- else:
+ flags = sharedCoordIndices.get(coord)
+ if flags is None:
flags = EMBEDDED_PEAK_TUPLE
tupleData.append(coord)
@@ -143,26 +150,27 @@ class TupleVariation(object):
flags |= INTERMEDIATE_REGION
tupleData.append(intermediateCoord)
- points = self.getUsedPoints()
- if sharedPoints == points:
- # Only use the shared points if they are identical to the actually used points
- auxData = self.compileDeltas(sharedPoints)
- usesSharedPoints = True
- else:
+ # pointData of b'' implies "use shared points".
+ if pointData:
flags |= PRIVATE_POINT_NUMBERS
- numPointsInGlyph = len(self.coordinates)
- auxData = self.compilePoints(points, numPointsInGlyph) + self.compileDeltas(points)
- usesSharedPoints = False
+ auxData.append(pointData)
- tupleData = struct.pack('>HH', len(auxData), flags) + bytesjoin(tupleData)
- return (tupleData, auxData, usesSharedPoints)
+ auxData.append(self.compileDeltas())
+ auxData = b''.join(auxData)
+
+ tupleData.insert(0, struct.pack('>HH', len(auxData), flags))
+ return b''.join(tupleData), auxData
def compileCoord(self, axisTags):
- result = []
+ result = bytearray()
+ axes = self.axes
for axis in axisTags:
- _minValue, value, _maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
- result.append(struct.pack(">h", fl2fi(value, 14)))
- return bytesjoin(result)
+ triple = axes.get(axis)
+ if triple is None:
+ result.extend(b'\0\0')
+ else:
+ result.extend(struct.pack(">h", fl2fi(triple[1], 14)))
+ return bytes(result)
def compileIntermediateCoord(self, axisTags):
needed = False
@@ -175,13 +183,13 @@ class TupleVariation(object):
break
if not needed:
return None
- minCoords = []
- maxCoords = []
+ minCoords = bytearray()
+ maxCoords = bytearray()
for axis in axisTags:
minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
- minCoords.append(struct.pack(">h", fl2fi(minValue, 14)))
- maxCoords.append(struct.pack(">h", fl2fi(maxValue, 14)))
- return bytesjoin(minCoords + maxCoords)
+ minCoords.extend(struct.pack(">h", fl2fi(minValue, 14)))
+ maxCoords.extend(struct.pack(">h", fl2fi(maxValue, 14)))
+ return minCoords + maxCoords
@staticmethod
def decompileCoord_(axisTags, data, offset):
@@ -193,11 +201,15 @@ class TupleVariation(object):
return coord, pos
@staticmethod
- def compilePoints(points, numPointsInGlyph):
+ def compilePoints(points):
# If the set consists of all points in the glyph, it gets encoded with
# a special encoding: a single zero byte.
- if len(points) == numPointsInGlyph:
- return b"\0"
+ #
+ # To use this optimization, points passed in must be empty set.
+ # The following two lines are not strictly necessary as the main code
+ # below would emit the same. But this is most common and faster.
+ if not points:
+ return b'\0'
# In the 'gvar' table, the packing of point numbers is a little surprising.
# It consists of multiple runs, each being a delta-encoded list of integers.
@@ -209,19 +221,24 @@ class TupleVariation(object):
points.sort()
numPoints = len(points)
+ result = bytearray()
# The binary representation starts with the total number of points in the set,
# encoded into one or two bytes depending on the value.
if numPoints < 0x80:
- result = [bytechr(numPoints)]
+ result.append(numPoints)
else:
- result = [bytechr((numPoints >> 8) | 0x80) + bytechr(numPoints & 0xff)]
+ result.append((numPoints >> 8) | 0x80)
+ result.append(numPoints & 0xff)
MAX_RUN_LENGTH = 127
pos = 0
lastValue = 0
while pos < numPoints:
- run = io.BytesIO()
runLength = 0
+
+ headerPos = len(result)
+ result.append(0)
+
useByteEncoding = None
while pos < numPoints and runLength <= MAX_RUN_LENGTH:
curValue = points[pos]
@@ -234,38 +251,36 @@ class TupleVariation(object):
# TODO This never switches back to a byte-encoding from a short-encoding.
# That's suboptimal.
if useByteEncoding:
- run.write(bytechr(delta))
+ result.append(delta)
else:
- run.write(bytechr(delta >> 8))
- run.write(bytechr(delta & 0xff))
+ result.append(delta >> 8)
+ result.append(delta & 0xff)
lastValue = curValue
pos += 1
runLength += 1
if useByteEncoding:
- runHeader = bytechr(runLength - 1)
+ result[headerPos] = runLength - 1
else:
- runHeader = bytechr((runLength - 1) | POINTS_ARE_WORDS)
- result.append(runHeader)
- result.append(run.getvalue())
+ result[headerPos] = (runLength - 1) | POINTS_ARE_WORDS
- return bytesjoin(result)
+ return result
@staticmethod
def decompilePoints_(numPoints, data, offset, tableTag):
"""(numPoints, data, offset, tableTag) --> ([point1, point2, ...], newOffset)"""
assert tableTag in ('cvar', 'gvar')
pos = offset
- numPointsInData = byteord(data[pos])
+ numPointsInData = data[pos]
pos += 1
if (numPointsInData & POINTS_ARE_WORDS) != 0:
- numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | byteord(data[pos])
+ numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | data[pos]
pos += 1
if numPointsInData == 0:
return (range(numPoints), pos)
result = []
while len(result) < numPointsInData:
- runHeader = byteord(data[pos])
+ runHeader = data[pos]
pos += 1
numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1
point = 0
@@ -298,23 +313,28 @@ class TupleVariation(object):
(",".join(sorted(badPoints)), tableTag))
return (result, pos)
- def compileDeltas(self, points):
+ def compileDeltas(self):
deltaX = []
deltaY = []
- for p in sorted(list(points)):
- c = self.coordinates[p]
- if type(c) is tuple and len(c) == 2:
+ if self.getCoordWidth() == 2:
+ for c in self.coordinates:
+ if c is None:
+ continue
deltaX.append(c[0])
deltaY.append(c[1])
- elif type(c) is int:
+ else:
+ for c in self.coordinates:
+ if c is None:
+ continue
deltaX.append(c)
- elif c is not None:
- raise TypeError("invalid type of delta: %s" % type(c))
- return self.compileDeltaValues_(deltaX) + self.compileDeltaValues_(deltaY)
+ bytearr = bytearray()
+ self.compileDeltaValues_(deltaX, bytearr)
+ self.compileDeltaValues_(deltaY, bytearr)
+ return bytearr
@staticmethod
- def compileDeltaValues_(deltas):
- """[value1, value2, value3, ...] --> bytestring
+ def compileDeltaValues_(deltas, bytearr=None):
+ """[value1, value2, value3, ...] --> bytearray
Emits a sequence of runs. Each run starts with a
byte-sized header whose 6 least significant bits
@@ -329,38 +349,41 @@ class TupleVariation(object):
bytes; if (header & 0x40) is set, the delta values are
signed 16-bit integers.
""" # Explaining the format because the 'gvar' spec is hard to understand.
- stream = io.BytesIO()
+ if bytearr is None:
+ bytearr = bytearray()
pos = 0
- while pos < len(deltas):
+ numDeltas = len(deltas)
+ while pos < numDeltas:
value = deltas[pos]
if value == 0:
- pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, stream)
- elif value >= -128 and value <= 127:
- pos = TupleVariation.encodeDeltaRunAsBytes_(deltas, pos, stream)
+ pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, bytearr)
+ elif -128 <= value <= 127:
+ pos = TupleVariation.encodeDeltaRunAsBytes_(deltas, pos, bytearr)
else:
- pos = TupleVariation.encodeDeltaRunAsWords_(deltas, pos, stream)
- return stream.getvalue()
+ pos = TupleVariation.encodeDeltaRunAsWords_(deltas, pos, bytearr)
+ return bytearr
@staticmethod
- def encodeDeltaRunAsZeroes_(deltas, offset, stream):
- runLength = 0
+ def encodeDeltaRunAsZeroes_(deltas, offset, bytearr):
pos = offset
numDeltas = len(deltas)
- while pos < numDeltas and runLength < 64 and deltas[pos] == 0:
+ while pos < numDeltas and deltas[pos] == 0:
pos += 1
- runLength += 1
- assert runLength >= 1 and runLength <= 64
- stream.write(bytechr(DELTAS_ARE_ZERO | (runLength - 1)))
+ runLength = pos - offset
+ while runLength >= 64:
+ bytearr.append(DELTAS_ARE_ZERO | 63)
+ runLength -= 64
+ if runLength:
+ bytearr.append(DELTAS_ARE_ZERO | (runLength - 1))
return pos
@staticmethod
- def encodeDeltaRunAsBytes_(deltas, offset, stream):
- runLength = 0
+ def encodeDeltaRunAsBytes_(deltas, offset, bytearr):
pos = offset
numDeltas = len(deltas)
- while pos < numDeltas and runLength < 64:
+ while pos < numDeltas:
value = deltas[pos]
- if value < -128 or value > 127:
+ if not (-128 <= value <= 127):
break
# Within a byte-encoded run of deltas, a single zero
# is best stored literally as 0x00 value. However,
@@ -373,19 +396,22 @@ class TupleVariation(object):
if value == 0 and pos+1 < numDeltas and deltas[pos+1] == 0:
break
pos += 1
- runLength += 1
- assert runLength >= 1 and runLength <= 64
- stream.write(bytechr(runLength - 1))
- for i in range(offset, pos):
- stream.write(struct.pack('b', otRound(deltas[i])))
+ runLength = pos - offset
+ while runLength >= 64:
+ bytearr.append(63)
+ bytearr.extend(array.array('b', deltas[offset:offset+64]))
+ offset += 64
+ runLength -= 64
+ if runLength:
+ bytearr.append(runLength - 1)
+ bytearr.extend(array.array('b', deltas[offset:pos]))
return pos
@staticmethod
- def encodeDeltaRunAsWords_(deltas, offset, stream):
- runLength = 0
+ def encodeDeltaRunAsWords_(deltas, offset, bytearr):
pos = offset
numDeltas = len(deltas)
- while pos < numDeltas and runLength < 64:
+ while pos < numDeltas:
value = deltas[pos]
# Within a word-encoded run of deltas, it is easiest
# to start a new run (with a different encoding)
@@ -403,15 +429,22 @@ class TupleVariation(object):
# [0x6666, 2, 0x7777] becomes 7 bytes when storing
# the value literally (42 66 66 00 02 77 77), but 8 bytes
# when starting a new run (40 66 66 00 02 40 77 77).
- isByteEncodable = lambda value: value >= -128 and value <= 127
- if isByteEncodable(value) and pos+1 < numDeltas and isByteEncodable(deltas[pos+1]):
+ if (-128 <= value <= 127) and pos+1 < numDeltas and (-128 <= deltas[pos+1] <= 127):
break
pos += 1
- runLength += 1
- assert runLength >= 1 and runLength <= 64
- stream.write(bytechr(DELTAS_ARE_WORDS | (runLength - 1)))
- for i in range(offset, pos):
- stream.write(struct.pack('>h', otRound(deltas[i])))
+ runLength = pos - offset
+ while runLength >= 64:
+ bytearr.append(DELTAS_ARE_WORDS | 63)
+ a = array.array('h', deltas[offset:offset+64])
+ if sys.byteorder != "big": a.byteswap()
+ bytearr.extend(a)
+ offset += 64
+ runLength -= 64
+ if runLength:
+ bytearr.append(DELTAS_ARE_WORDS | (runLength - 1))
+ a = array.array('h', deltas[offset:pos])
+ if sys.byteorder != "big": a.byteswap()
+ bytearr.extend(a)
return pos
@staticmethod
@@ -420,7 +453,7 @@ class TupleVariation(object):
result = []
pos = offset
while len(result) < numDeltas:
- runHeader = byteord(data[pos])
+ runHeader = data[pos]
pos += 1
numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1
if (runHeader & DELTAS_ARE_ZERO) != 0:
@@ -523,9 +556,9 @@ class TupleVariation(object):
# Shouldn't matter that this is different from fvar...?
axisTags = sorted(self.axes.keys())
- tupleData, auxData, _ = self.compile(axisTags, [], None)
+ tupleData, auxData = self.compile(axisTags)
unoptimizedLength = len(tupleData) + len(auxData)
- tupleData, auxData, _ = varOpt.compile(axisTags, [], None)
+ tupleData, auxData = varOpt.compile(axisTags)
optimizedLength = len(tupleData) + len(auxData)
if optimizedLength < unoptimizedLength:
@@ -577,87 +610,77 @@ def decompileSharedTuples(axisTags, sharedTupleCount, data, offset):
return result
-def compileSharedTuples(axisTags, variations):
- coordCount = {}
+def compileSharedTuples(axisTags, variations,
+ MAX_NUM_SHARED_COORDS = TUPLE_INDEX_MASK + 1):
+ coordCount = Counter()
for var in variations:
coord = var.compileCoord(axisTags)
- coordCount[coord] = coordCount.get(coord, 0) + 1
- sharedCoords = [(count, coord)
- for (coord, count) in coordCount.items() if count > 1]
- sharedCoords.sort(reverse=True)
- MAX_NUM_SHARED_COORDS = TUPLE_INDEX_MASK + 1
- sharedCoords = sharedCoords[:MAX_NUM_SHARED_COORDS]
- return [c[1] for c in sharedCoords] # Strip off counts.
+ coordCount[coord] += 1
+ # In python < 3.7, most_common() ordering is non-deterministic
+ # so apply a sort to make sure the ordering is consistent.
+ sharedCoords = sorted(
+ coordCount.most_common(MAX_NUM_SHARED_COORDS),
+ key=lambda item: (-item[1], item[0]),
+ )
+ return [c[0] for c in sharedCoords if c[1] > 1]
def compileTupleVariationStore(variations, pointCount,
axisTags, sharedTupleIndices,
useSharedPoints=True):
- variations = [v for v in variations if v.hasImpact()]
- if len(variations) == 0:
- return (0, b"", b"")
+ newVariations = []
+ pointDatas = []
+ # Compile all points and figure out sharing if desired
+ sharedPoints = None
- # Each glyph variation tuples modifies a set of control points. To
- # indicate which exact points are getting modified, a single tuple
- # can either refer to a shared set of points, or the tuple can
- # supply its private point numbers. Because the impact of sharing
- # can be positive (no need for a private point list) or negative
- # (need to supply 0,0 deltas for unused points), it is not obvious
- # how to determine which tuples should take their points from the
- # shared pool versus have their own. Perhaps we should resort to
- # brute force, and try all combinations? However, if a glyph has n
- # variation tuples, we would need to try 2^n combinations (because
- # each tuple may or may not be part of the shared set). How many
- # variations tuples do glyphs have?
- #
- # Skia.ttf: {3: 1, 5: 11, 6: 41, 7: 62, 8: 387, 13: 1, 14: 3}
- # JamRegular.ttf: {3: 13, 4: 122, 5: 1, 7: 4, 8: 1, 9: 1, 10: 1}
- # BuffaloGalRegular.ttf: {1: 16, 2: 13, 4: 2, 5: 4, 6: 19, 7: 1, 8: 3, 9: 8}
- # (Reading example: In Skia.ttf, 41 glyphs have 6 variation tuples).
- #
-
- # Is this even worth optimizing? If we never use a shared point
- # list, the private lists will consume 112K for Skia, 5K for
- # BuffaloGalRegular, and 15K for JamRegular. If we always use a
- # shared point list, the shared lists will consume 16K for Skia,
- # 3K for BuffaloGalRegular, and 10K for JamRegular. However, in
- # the latter case the delta arrays will become larger, but I
- # haven't yet measured by how much. From gut feeling (which may be
- # wrong), the optimum is to share some but not all points;
- # however, then we would need to try all combinations.
- #
- # For the time being, we try two variants and then pick the better one:
- # (a) each tuple supplies its own private set of points;
- # (b) all tuples refer to a shared set of points, which consists of
- # "every control point in the glyph that has explicit deltas".
- usedPoints = set()
+ # Collect, count, and compile point-sets for all variation sets
+ pointSetCount = defaultdict(int)
for v in variations:
- usedPoints |= v.getUsedPoints()
+ points = v.getUsedPoints()
+ if points is None: # Empty variations
+ continue
+ pointSetCount[points] += 1
+ newVariations.append(v)
+ pointDatas.append(points)
+ variations = newVariations
+ del newVariations
+
+ if not variations:
+ return (0, b"", b"")
+
+ n = len(variations[0].coordinates)
+ assert all(len(v.coordinates) == n for v in variations), "Variation sets have different sizes"
+
+ compiledPoints = {pointSet:TupleVariation.compilePoints(pointSet)
+ for pointSet in pointSetCount}
+
+ tupleVariationCount = len(variations)
tuples = []
data = []
- someTuplesSharePoints = False
- sharedPointVariation = None # To keep track of a variation that uses shared points
- for v in variations:
- privateTuple, privateData, _ = v.compile(
- axisTags, sharedTupleIndices, sharedPoints=None)
- sharedTuple, sharedData, usesSharedPoints = v.compile(
- axisTags, sharedTupleIndices, sharedPoints=usedPoints)
- if useSharedPoints and (len(sharedTuple) + len(sharedData)) < (len(privateTuple) + len(privateData)):
- tuples.append(sharedTuple)
- data.append(sharedData)
- someTuplesSharePoints |= usesSharedPoints
- sharedPointVariation = v
- else:
- tuples.append(privateTuple)
- data.append(privateData)
- if someTuplesSharePoints:
- # Use the last of the variations that share points for compiling the packed point data
- data = sharedPointVariation.compilePoints(usedPoints, len(sharedPointVariation.coordinates)) + bytesjoin(data)
- tupleVariationCount = TUPLES_SHARE_POINT_NUMBERS | len(tuples)
- else:
- data = bytesjoin(data)
- tupleVariationCount = len(tuples)
- tuples = bytesjoin(tuples)
+
+ if useSharedPoints:
+ # Find point-set which saves most bytes.
+ def key(pn):
+ pointSet = pn[0]
+ count = pn[1]
+ return len(compiledPoints[pointSet]) * (count - 1)
+ sharedPoints = max(pointSetCount.items(), key=key)[0]
+
+ data.append(compiledPoints[sharedPoints])
+ tupleVariationCount |= TUPLES_SHARE_POINT_NUMBERS
+
+ # b'' implies "use shared points"
+ pointDatas = [compiledPoints[points] if points != sharedPoints else b''
+ for points in pointDatas]
+
+ for v,p in zip(variations, pointDatas):
+ thisTuple, thisData = v.compile(axisTags, sharedTupleIndices, pointData=p)
+
+ tuples.append(thisTuple)
+ data.append(thisData)
+
+ tuples = b''.join(tuples)
+ data = b''.join(data)
return tupleVariationCount, tuples, data
diff --git a/Lib/fontTools/ttLib/tables/V_O_R_G_.py b/Lib/fontTools/ttLib/tables/V_O_R_G_.py
index 0b7fe959..e03e164b 100644
--- a/Lib/fontTools/ttLib/tables/V_O_R_G_.py
+++ b/Lib/fontTools/ttLib/tables/V_O_R_G_.py
@@ -1,14 +1,15 @@
-from fontTools.misc.py23 import bytesjoin
-from fontTools.misc.textTools import safeEval
+from fontTools.misc.textTools import bytesjoin, safeEval
from . import DefaultTable
import struct
class table_V_O_R_G_(DefaultTable.DefaultTable):
- """ This table is structured so that you can treat it like a dictionary keyed by glyph name.
- ttFont['VORG'][<glyphName>] will return the vertical origin for any glyph
- ttFont['VORG'][<glyphName>] = <value> will set the vertical origin for any glyph.
+ """This table is structured so that you can treat it like a dictionary keyed by glyph name.
+
+ ``ttFont['VORG'][<glyphName>]`` will return the vertical origin for any glyph.
+
+ ``ttFont['VORG'][<glyphName>] = <value>`` will set the vertical origin for any glyph.
"""
def decompile(self, data, ttFont):
diff --git a/Lib/fontTools/ttLib/tables/_a_n_k_r.py b/Lib/fontTools/ttLib/tables/_a_n_k_r.py
index 1f2946c2..16f5c184 100644
--- a/Lib/fontTools/ttLib/tables/_a_n_k_r.py
+++ b/Lib/fontTools/ttLib/tables/_a_n_k_r.py
@@ -1,11 +1,12 @@
from .otBase import BaseTTXConverter
-
-# The anchor point table provides a way to define anchor points.
-# These are points within the coordinate space of a given glyph,
-# independent of the control points used to render the glyph.
-# Anchor points are used in conjunction with the 'kerx' table.
-#
-# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ankr.html
class table__a_n_k_r(BaseTTXConverter):
+ """
+ The anchor point table provides a way to define anchor points.
+ These are points within the coordinate space of a given glyph,
+ independent of the control points used to render the glyph.
+ Anchor points are used in conjunction with the 'kerx' table.
+
+ See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ankr.html
+ """
pass
diff --git a/Lib/fontTools/ttLib/tables/_a_v_a_r.py b/Lib/fontTools/ttLib/tables/_a_v_a_r.py
index 2b6a40ed..16f2a219 100644
--- a/Lib/fontTools/ttLib/tables/_a_v_a_r.py
+++ b/Lib/fontTools/ttLib/tables/_a_v_a_r.py
@@ -1,4 +1,3 @@
-from fontTools.misc.py23 import bytesjoin
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
@@ -6,6 +5,7 @@ from fontTools.misc.fixedTools import (
floatToFixedToStr as fl2str,
strToFixedToFloat as str2fl,
)
+from fontTools.misc.textTools import bytesjoin
from fontTools.ttLib import TTLibError
from . import DefaultTable
import struct
@@ -28,6 +28,28 @@ assert sstruct.calcsize(AVAR_HEADER_FORMAT) == 8, sstruct.calcsize(AVAR_HEADER_F
class table__a_v_a_r(DefaultTable.DefaultTable):
+ """Axis Variations Table
+
+ This class represents the ``avar`` table of a variable font. The object has one
+ substantive attribute, ``segments``, which maps axis tags to a segments dictionary::
+
+ >>> font["avar"].segments # doctest: +SKIP
+ {'wght': {-1.0: -1.0,
+ 0.0: 0.0,
+ 0.125: 0.11444091796875,
+ 0.25: 0.23492431640625,
+ 0.5: 0.35540771484375,
+ 0.625: 0.5,
+ 0.75: 0.6566162109375,
+ 0.875: 0.81927490234375,
+ 1.0: 1.0},
+ 'ital': {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}}
+
+ Notice that the segments dictionary is made up of normalized values. A valid
+ ``avar`` segment mapping must contain the entries ``-1.0: -1.0, 0.0: 0.0, 1.0: 1.0``.
+ fontTools does not enforce this, so it is your responsibility to ensure that
+ mappings are valid.
+ """
dependencies = ["fvar"]
diff --git a/Lib/fontTools/ttLib/tables/_c_i_d_g.py b/Lib/fontTools/ttLib/tables/_c_i_d_g.py
index de83d4d6..2517e785 100644
--- a/Lib/fontTools/ttLib/tables/_c_i_d_g.py
+++ b/Lib/fontTools/ttLib/tables/_c_i_d_g.py
@@ -2,17 +2,18 @@
from .otBase import BaseTTXConverter
-# The AAT ‘cidg’ table has almost the same structure as ‘gidc’,
-# just mapping CIDs to GlyphIDs instead of the reverse direction.
-#
-# It is useful for fonts that may be used by a PDF renderer in lieu of
-# a font reference with a known glyph collection but no subsetted
-# glyphs. For instance, a PDF can say “please use a font conforming
-# to Adobe-Japan-1”; the ‘cidg’ mapping is necessary if the font is,
-# say, a TrueType font. ‘gidc’ is lossy for this purpose and is
-# obsoleted by ‘cidg’.
-#
-# For example, the first font in /System/Library/Fonts/PingFang.ttc
-# (which Apple ships pre-installed on MacOS 10.12.6) has a ‘cidg’ table.
class table__c_i_d_g(BaseTTXConverter):
+ """The AAT ``cidg`` table has almost the same structure as ``gidc``,
+just mapping CIDs to GlyphIDs instead of the reverse direction.
+
+It is useful for fonts that may be used by a PDF renderer in lieu of
+a font reference with a known glyph collection but no subsetted
+glyphs. For instance, a PDF can say “please use a font conforming
+to Adobe-Japan-1”; the ``cidg`` mapping is necessary if the font is,
+say, a TrueType font. ``gidc`` is lossy for this purpose and is
+obsoleted by ``cidg``.
+
+For example, the first font in ``/System/Library/Fonts/PingFang.ttc``
+(which Apple ships pre-installed on MacOS 10.12.6) has a ``cidg`` table.
+"""
pass
diff --git a/Lib/fontTools/ttLib/tables/_c_m_a_p.py b/Lib/fontTools/ttLib/tables/_c_m_a_p.py
index a65a0c25..a31b5059 100644
--- a/Lib/fontTools/ttLib/tables/_c_m_a_p.py
+++ b/Lib/fontTools/ttLib/tables/_c_m_a_p.py
@@ -1,5 +1,4 @@
-from fontTools.misc.py23 import bytesjoin
-from fontTools.misc.textTools import safeEval, readHex
+from fontTools.misc.textTools import bytesjoin, safeEval, readHex
from fontTools.misc.encodingTools import getEncoding
from fontTools.ttLib import getSearchRange
from fontTools.unicode import Unicode
@@ -15,21 +14,61 @@ log = logging.getLogger(__name__)
def _make_map(font, chars, gids):
assert len(chars) == len(gids)
+ glyphNames = font.getGlyphNameMany(gids)
cmap = {}
- glyphOrder = font.getGlyphOrder()
- for char,gid in zip(chars,gids):
+ for char,gid,name in zip(chars,gids,glyphNames):
if gid == 0:
continue
- try:
- name = glyphOrder[gid]
- except IndexError:
- name = font.getGlyphName(gid)
cmap[char] = name
return cmap
class table__c_m_a_p(DefaultTable.DefaultTable):
+ """Character to Glyph Index Mapping Table
+
+ This class represents the `cmap <https://docs.microsoft.com/en-us/typography/opentype/spec/cmap>`_
+ table, which maps between input characters (in Unicode or other system encodings)
+ and glyphs within the font. The ``cmap`` table contains one or more subtables
+ which determine the mapping of of characters to glyphs across different platforms
+ and encoding systems.
+
+ ``table__c_m_a_p`` objects expose an accessor ``.tables`` which provides access
+ to the subtables, although it is normally easier to retrieve individual subtables
+ through the utility methods described below. To add new subtables to a font,
+ first determine the subtable format (if in doubt use format 4 for glyphs within
+ the BMP, format 12 for glyphs outside the BMP, and format 14 for Unicode Variation
+ Sequences) construct subtable objects with ``CmapSubtable.newSubtable(format)``,
+ and append them to the ``.tables`` list.
+
+ Within a subtable, the mapping of characters to glyphs is provided by the ``.cmap``
+ attribute.
+
+ Example::
+
+ cmap4_0_3 = CmapSubtable.newSubtable(4)
+ cmap4_0_3.platformID = 0
+ cmap4_0_3.platEncID = 3
+ cmap4_0_3.language = 0
+ cmap4_0_3.cmap = { 0xC1: "Aacute" }
+
+ cmap = newTable("cmap")
+ cmap.tableVersion = 0
+ cmap.tables = [cmap4_0_3]
+ """
def getcmap(self, platformID, platEncID):
+ """Returns the first subtable which matches the given platform and encoding.
+
+ Args:
+ platformID (int): The platform ID. Use 0 for Unicode, 1 for Macintosh
+ (deprecated for new fonts), 2 for ISO (deprecated) and 3 for Windows.
+ encodingID (int): Encoding ID. Interpretation depends on the platform ID.
+ See the OpenType specification for details.
+
+ Returns:
+ An object which is a subclass of :py:class:`CmapSubtable` if a matching
+ subtable is found within the font, or ``None`` otherwise.
+ """
+
for subtable in self.tables:
if (subtable.platformID == platformID and
subtable.platEncID == platEncID):
@@ -37,13 +76,22 @@ class table__c_m_a_p(DefaultTable.DefaultTable):
return None # not found
def getBestCmap(self, cmapPreferences=((3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0))):
- """Return the 'best' unicode cmap dictionary available in the font,
- or None, if no unicode cmap subtable is available.
+ """Returns the 'best' Unicode cmap dictionary available in the font
+ or ``None``, if no Unicode cmap subtable is available.
By default it will search for the following (platformID, platEncID)
- pairs:
- (3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0)
- This can be customized via the cmapPreferences argument.
+ pairs in order::
+
+ (3, 10), # Windows Unicode full repertoire
+ (0, 6), # Unicode full repertoire (format 13 subtable)
+ (0, 4), # Unicode 2.0 full repertoire
+ (3, 1), # Windows Unicode BMP
+ (0, 3), # Unicode 2.0 BMP
+ (0, 2), # Unicode ISO/IEC 10646
+ (0, 1), # Unicode 1.1
+ (0, 0) # Unicode 1.0
+
+ This order can be customized via the ``cmapPreferences`` argument.
"""
for platformID, platEncID in cmapPreferences:
cmapSubtable = self.getcmap(platformID, platEncID)
@@ -52,12 +100,20 @@ class table__c_m_a_p(DefaultTable.DefaultTable):
return None # None of the requested cmap subtables were found
def buildReversed(self):
- """Returns a reverse cmap such as {'one':{0x31}, 'A':{0x41,0x391}}.
+ """Builds a reverse mapping dictionary
+
+ Iterates over all Unicode cmap tables and returns a dictionary mapping
+ glyphs to sets of codepoints, such as::
+
+ {
+ 'one': {0x31}
+ 'A': {0x41,0x391}
+ }
The values are sets of Unicode codepoints because
some fonts map different codepoints to the same glyph.
- For example, U+0041 LATIN CAPITAL LETTER A and U+0391
- GREEK CAPITAL LETTER ALPHA are sometimes the same glyph.
+ For example, ``U+0041 LATIN CAPITAL LETTER A`` and ``U+0391
+ GREEK CAPITAL LETTER ALPHA`` are sometimes the same glyph.
"""
result = {}
for subtable in self.tables:
@@ -100,6 +156,12 @@ class table__c_m_a_p(DefaultTable.DefaultTable):
else:
seenOffsets[offset] = i
tables.append(table)
+ if ttFont.lazy is False: # Be lazy for None and True
+ self.ensureDecompiled()
+
+ def ensureDecompiled(self):
+ for st in self.tables:
+ st.ensureDecompiled()
def compile(self, ttFont):
self.tables.sort() # sort according to the spec; see CmapSubtable.__lt__()
@@ -145,6 +207,16 @@ class table__c_m_a_p(DefaultTable.DefaultTable):
class CmapSubtable(object):
+ """Base class for all cmap subtable formats.
+
+ Subclasses which handle the individual subtable formats are named
+ ``cmap_format_0``, ``cmap_format_2`` etc. Use :py:meth:`getSubtableClass`
+ to retrieve the concrete subclass, or :py:meth:`newSubtable` to get a
+ new subtable object for a given format.
+
+ The object exposes a ``.cmap`` attribute, which contains a dictionary mapping
+ character codepoints to glyph names.
+ """
@staticmethod
def getSubtableClass(format):
@@ -153,7 +225,8 @@ class CmapSubtable(object):
@staticmethod
def newSubtable(format):
- """Return a new instance of a subtable for format."""
+ """Return a new instance of a subtable for the given format
+ ."""
subtableClass = CmapSubtable.getSubtableClass(format)
return subtableClass(format)
@@ -161,6 +234,17 @@ class CmapSubtable(object):
self.format = format
self.data = None
self.ttFont = None
+ self.platformID = None #: The platform ID of this subtable
+ self.platEncID = None #: The encoding ID of this subtable (interpretation depends on ``platformID``)
+ self.language = None #: The language ID of this subtable (Macintosh platform only)
+
+ def ensureDecompiled(self):
+ if self.data is None:
+ return
+ self.decompile(None, None) # use saved data.
+ self.data = None # Once this table has been decompiled, make sure we don't
+ # just return the original data. Also avoids recursion when
+ # called with an attribute that the cmap subtable doesn't have.
def __getattr__(self, attr):
# allow lazy decompilation of subtables.
@@ -168,10 +252,7 @@ class CmapSubtable(object):
raise AttributeError(attr)
if self.data is None:
raise AttributeError(attr)
- self.decompile(None, None) # use saved data.
- self.data = None # Once this table has been decompiled, make sure we don't
- # just return the original data. Also avoids recursion when
- # called with an attribute that the cmap subtable doesn't have.
+ self.ensureDecompiled()
return getattr(self, attr)
def decompileHeader(self, data, ttFont):
@@ -198,20 +279,22 @@ class CmapSubtable(object):
def getEncoding(self, default=None):
"""Returns the Python encoding name for this cmap subtable based on its platformID,
platEncID, and language. If encoding for these values is not known, by default
- None is returned. That can be overriden by passing a value to the default
+ ``None`` is returned. That can be overridden by passing a value to the ``default``
argument.
Note that if you want to choose a "preferred" cmap subtable, most of the time
- self.isUnicode() is what you want as that one only returns true for the modern,
+ ``self.isUnicode()`` is what you want as that one only returns true for the modern,
commonly used, Unicode-compatible triplets, not the legacy ones.
"""
return getEncoding(self.platformID, self.platEncID, self.language, default)
def isUnicode(self):
+ """Returns true if the characters are interpreted as Unicode codepoints."""
return (self.platformID == 0 or
(self.platformID == 3 and self.platEncID in [0, 1, 10]))
def isSymbol(self):
+ """Returns true if the subtable is for the Symbol encoding (3,0)"""
return self.platformID == 3 and self.platEncID == 0
def _writeCodes(self, codes, writer):
diff --git a/Lib/fontTools/ttLib/tables/_c_v_a_r.py b/Lib/fontTools/ttLib/tables/_c_v_a_r.py
index 09b2c16c..a67efe02 100644
--- a/Lib/fontTools/ttLib/tables/_c_v_a_r.py
+++ b/Lib/fontTools/ttLib/tables/_c_v_a_r.py
@@ -1,6 +1,6 @@
-from fontTools.misc.py23 import bytesjoin
from . import DefaultTable
from fontTools.misc import sstruct
+from fontTools.misc.textTools import bytesjoin
from fontTools.ttLib.tables.TupleVariation import \
compileTupleVariationStore, decompileTupleVariationStore, TupleVariation
@@ -41,7 +41,7 @@ class table__c_v_a_r(DefaultTable.DefaultTable):
"tupleVariationCount": tupleVariationCount,
"offsetToData": CVAR_HEADER_SIZE + len(tuples),
}
- return bytesjoin([
+ return b''.join([
sstruct.pack(CVAR_HEADER_FORMAT, header),
tuples,
data
diff --git a/Lib/fontTools/ttLib/tables/_f_e_a_t.py b/Lib/fontTools/ttLib/tables/_f_e_a_t.py
index eb03f8ba..079b514c 100644
--- a/Lib/fontTools/ttLib/tables/_f_e_a_t.py
+++ b/Lib/fontTools/ttLib/tables/_f_e_a_t.py
@@ -2,4 +2,10 @@ from .otBase import BaseTTXConverter
class table__f_e_a_t(BaseTTXConverter):
+ """The feature name table is an AAT (Apple Advanced Typography) table for
+ storing font features, settings, and their human-readable names. It should
+ not be confused with the ``Feat`` table or the OpenType Layout ``GSUB``/``GPOS``
+ tables. See `Feature Name Table <https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6feat.html>`_
+ in the TrueType Reference Manual for more information on the structure and
+ purpose of this table."""
pass
diff --git a/Lib/fontTools/ttLib/tables/_f_v_a_r.py b/Lib/fontTools/ttLib/tables/_f_v_a_r.py
index 7487da62..d7409195 100644
--- a/Lib/fontTools/ttLib/tables/_f_v_a_r.py
+++ b/Lib/fontTools/ttLib/tables/_f_v_a_r.py
@@ -1,4 +1,3 @@
-from fontTools.misc.py23 import Tag, bytesjoin
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
@@ -6,7 +5,7 @@ from fontTools.misc.fixedTools import (
floatToFixedToStr as fl2str,
strToFixedToFloat as str2fl,
)
-from fontTools.misc.textTools import safeEval
+from fontTools.misc.textTools import Tag, bytesjoin, safeEval
from fontTools.ttLib import TTLibError
from . import DefaultTable
import struct
diff --git a/Lib/fontTools/ttLib/tables/_g_l_y_f.py b/Lib/fontTools/ttLib/tables/_g_l_y_f.py
index 4680ddbf..14c4519d 100644
--- a/Lib/fontTools/ttLib/tables/_g_l_y_f.py
+++ b/Lib/fontTools/ttLib/tables/_g_l_y_f.py
@@ -1,12 +1,11 @@
"""_g_l_y_f.py -- Converter classes for the 'glyf' table."""
from collections import namedtuple
-from fontTools.misc.py23 import bytechr, byteord, bytesjoin, tostr
from fontTools.misc import sstruct
from fontTools import ttLib
from fontTools import version
-from fontTools.misc.textTools import safeEval, pad
-from fontTools.misc.arrayTools import calcBounds, calcIntBounds, pointInRect
+from fontTools.misc.textTools import tostr, safeEval, pad
+from fontTools.misc.arrayTools import calcIntBounds, pointInRect
from fontTools.misc.bezierTools import calcQuadraticBounds
from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
@@ -25,6 +24,7 @@ import logging
import os
from fontTools.misc import xmlWriter
from fontTools.misc.filenames import userNameToFileName
+from fontTools.misc.loggingTools import deprecateFunction
log = logging.getLogger(__name__)
@@ -47,6 +47,35 @@ SCALE_COMPONENT_OFFSET_DEFAULT = 0 # 0 == MS, 1 == Apple
class table__g_l_y_f(DefaultTable.DefaultTable):
+ """Glyph Data Table
+
+ This class represents the `glyf <https://docs.microsoft.com/en-us/typography/opentype/spec/glyf>`_
+ table, which contains outlines for glyphs in TrueType format. In many cases,
+ it is easier to access and manipulate glyph outlines through the ``GlyphSet``
+ object returned from :py:meth:`fontTools.ttLib.ttFont.getGlyphSet`::
+
+ >> from fontTools.pens.boundsPen import BoundsPen
+ >> glyphset = font.getGlyphSet()
+ >> bp = BoundsPen(glyphset)
+ >> glyphset["A"].draw(bp)
+ >> bp.bounds
+ (19, 0, 633, 716)
+
+ However, this class can be used for low-level access to the ``glyf`` table data.
+ Objects of this class support dictionary-like access, mapping glyph names to
+ :py:class:`Glyph` objects::
+
+ >> glyf = font["glyf"]
+ >> len(glyf["Aacute"].components)
+ 2
+
+ Note that when adding glyphs to the font via low-level access to the ``glyf``
+ table, the new glyphs must also be added to the ``hmtx``/``vmtx`` table::
+
+ >> font["glyf"]["divisionslash"] = Glyph()
+ >> font["hmtx"]["divisionslash"] = (640, 0)
+
+ """
# this attribute controls the amount of padding applied to glyph data upon compile.
# Glyph lenghts are aligned to multiples of the specified value.
@@ -81,8 +110,11 @@ class table__g_l_y_f(DefaultTable.DefaultTable):
if noname:
log.warning('%s glyphs have no name', noname)
if ttFont.lazy is False: # Be lazy for None and True
- for glyph in self.glyphs.values():
- glyph.expand(self)
+ self.ensureDecompiled()
+
+ def ensureDecompiled(self):
+ for glyph in self.glyphs.values():
+ glyph.expand(self)
def compile(self, ttFont):
if not hasattr(self, "glyphOrder"):
@@ -117,7 +149,7 @@ class table__g_l_y_f(DefaultTable.DefaultTable):
currentLocation += len(glyphData)
locations[len(dataList)] = currentLocation
- data = bytesjoin(dataList)
+ data = b''.join(dataList)
if 'loca' in ttFont:
ttFont['loca'].set(locations)
if 'maxp' in ttFont:
@@ -145,10 +177,10 @@ class table__g_l_y_f(DefaultTable.DefaultTable):
path, ext = os.path.splitext(writer.file.name)
existingGlyphFiles = set()
for glyphName in glyphNames:
- if glyphName not in self:
+ glyph = self.get(glyphName)
+ if glyph is None:
log.warning("glyph '%s' does not exist in glyf table", glyphName)
continue
- glyph = self[glyphName]
if glyph.numberOfContours:
if splitGlyphs:
glyphPath = userNameToFileName(
@@ -215,16 +247,33 @@ class table__g_l_y_f(DefaultTable.DefaultTable):
glyph.compact(self, 0)
def setGlyphOrder(self, glyphOrder):
+ """Sets the glyph order
+
+ Args:
+ glyphOrder ([str]): List of glyph names in order.
+ """
self.glyphOrder = glyphOrder
def getGlyphName(self, glyphID):
+ """Returns the name for the glyph with the given ID.
+
+ Raises a ``KeyError`` if the glyph name is not found in the font.
+ """
return self.glyphOrder[glyphID]
def getGlyphID(self, glyphName):
+ """Returns the ID of the glyph with the given name.
+
+ Raises a ``ValueError`` if the glyph is not found in the font.
+ """
# XXX optimize with reverse dict!!!
return self.glyphOrder.index(glyphName)
def removeHinting(self):
+ """Removes TrueType hints from all glyphs in the glyphset.
+
+ See :py:meth:`Glyph.removeHinting`.
+ """
for glyph in self.glyphs.values():
glyph.removeHinting()
@@ -236,6 +285,12 @@ class table__g_l_y_f(DefaultTable.DefaultTable):
__contains__ = has_key
+ def get(self, glyphName, default=None):
+ glyph = self.glyphs.get(glyphName, default)
+ if glyph is not None:
+ glyph.expand(self)
+ return glyph
+
def __getitem__(self, glyphName):
glyph = self.glyphs[glyphName]
glyph.expand(self)
@@ -254,49 +309,33 @@ class table__g_l_y_f(DefaultTable.DefaultTable):
assert len(self.glyphOrder) == len(self.glyphs)
return len(self.glyphs)
- def getPhantomPoints(self, glyphName, ttFont, defaultVerticalOrigin=None):
+ def _getPhantomPoints(self, glyphName, hMetrics, vMetrics=None):
"""Compute the four "phantom points" for the given glyph from its bounding box
and the horizontal and vertical advance widths and sidebearings stored in the
ttFont's "hmtx" and "vmtx" tables.
- If the ttFont doesn't contain a "vmtx" table, the hhea.ascent is used as the
- vertical origin, and the head.unitsPerEm as the vertical advance.
+ 'hMetrics' should be ttFont['hmtx'].metrics.
- The "defaultVerticalOrigin" (Optional[int]) is needed when the ttFont contains
- neither a "vmtx" nor an "hhea" table, as may happen with 'sparse' masters.
- The value should be the hhea.ascent of the default master.
+ 'vMetrics' should be ttFont['vmtx'].metrics if there is "vmtx" or None otherwise.
+ If there is no vMetrics passed in, vertical phantom points are set to the zero coordinate.
https://docs.microsoft.com/en-us/typography/opentype/spec/tt_instructing_glyphs#phantoms
"""
glyph = self[glyphName]
- assert glyphName in ttFont["hmtx"].metrics, ttFont["hmtx"].metrics
- horizontalAdvanceWidth, leftSideBearing = ttFont["hmtx"].metrics[glyphName]
if not hasattr(glyph, 'xMin'):
glyph.recalcBounds(self)
+
+ horizontalAdvanceWidth, leftSideBearing = hMetrics[glyphName]
leftSideX = glyph.xMin - leftSideBearing
rightSideX = leftSideX + horizontalAdvanceWidth
- if "vmtx" in ttFont:
- verticalAdvanceWidth, topSideBearing = ttFont["vmtx"].metrics[glyphName]
+
+ if vMetrics:
+ verticalAdvanceWidth, topSideBearing = vMetrics[glyphName]
topSideY = topSideBearing + glyph.yMax
+ bottomSideY = topSideY - verticalAdvanceWidth
else:
- # without vmtx, use ascent as vertical origin and UPEM as vertical advance
- # like HarfBuzz does
- verticalAdvanceWidth = ttFont["head"].unitsPerEm
- if "hhea" in ttFont:
- topSideY = ttFont["hhea"].ascent
- else:
- # sparse masters may not contain an hhea table; use the ascent
- # of the default master as the vertical origin
- if defaultVerticalOrigin is not None:
- topSideY = defaultVerticalOrigin
- else:
- log.warning(
- "font is missing both 'vmtx' and 'hhea' tables, "
- "and no 'defaultVerticalOrigin' was provided; "
- "the vertical phantom points may be incorrect."
- )
- topSideY = verticalAdvanceWidth
- bottomSideY = topSideY - verticalAdvanceWidth
+ bottomSideY = topSideY = 0
+
return [
(leftSideX, 0),
(rightSideX, 0),
@@ -304,7 +343,7 @@ class table__g_l_y_f(DefaultTable.DefaultTable):
(0, bottomSideY),
]
- def getCoordinatesAndControls(self, glyphName, ttFont, defaultVerticalOrigin=None):
+ def _getCoordinatesAndControls(self, glyphName, hMetrics, vMetrics=None):
"""Return glyph coordinates and controls as expected by "gvar" table.
The coordinates includes four "phantom points" for the glyph metrics,
@@ -320,14 +359,14 @@ class table__g_l_y_f(DefaultTable.DefaultTable):
- components: list of base glyph names (str) for each component in
composite glyphs (None for simple glyphs).
- The "ttFont" and "defaultVerticalOrigin" args are used to compute the
- "phantom points" (see "getPhantomPoints" method).
+ The "hMetrics" and vMetrics are used to compute the "phantom points" (see
+ the "_getPhantomPoints" method).
Return None if the requested glyphName is not present.
"""
- if glyphName not in self.glyphs:
+ glyph = self.get(glyphName)
+ if glyph is None:
return None
- glyph = self[glyphName]
if glyph.isComposite():
coords = GlyphCoordinates(
[(getattr(c, 'x', 0), getattr(c, 'y', 0)) for c in glyph.components]
@@ -348,13 +387,11 @@ class table__g_l_y_f(DefaultTable.DefaultTable):
components=None,
)
# Add phantom points for (left, right, top, bottom) positions.
- phantomPoints = self.getPhantomPoints(
- glyphName, ttFont, defaultVerticalOrigin=defaultVerticalOrigin
- )
+ phantomPoints = self._getPhantomPoints(glyphName, hMetrics, vMetrics)
coords.extend(phantomPoints)
return coords, controls
- def setCoordinates(self, glyphName, coord, ttFont):
+ def _setCoordinates(self, glyphName, coord, hMetrics, vMetrics=None):
"""Set coordinates and metrics for the given glyph.
"coord" is an array of GlyphCoordinates which must include the "phantom
@@ -363,9 +400,11 @@ class table__g_l_y_f(DefaultTable.DefaultTable):
Both the horizontal/vertical advances and left/top sidebearings in "hmtx"
and "vmtx" tables (if any) are updated from four phantom points and
the glyph's bounding boxes.
+
+ The "hMetrics" and vMetrics are used to propagate "phantom points"
+ into "hmtx" and "vmtx" tables if desired. (see the "_getPhantomPoints"
+ method).
"""
- # TODO: Create new glyph if not already present
- assert glyphName in self.glyphs
glyph = self[glyphName]
# Handle phantom points for (left, right, top, bottom) positions.
@@ -396,14 +435,61 @@ class table__g_l_y_f(DefaultTable.DefaultTable):
# https://github.com/fonttools/fonttools/pull/1198
horizontalAdvanceWidth = 0
leftSideBearing = otRound(glyph.xMin - leftSideX)
- ttFont["hmtx"].metrics[glyphName] = horizontalAdvanceWidth, leftSideBearing
+ hMetrics[glyphName] = horizontalAdvanceWidth, leftSideBearing
- if "vmtx" in ttFont:
+ if vMetrics is not None:
verticalAdvanceWidth = otRound(topSideY - bottomSideY)
if verticalAdvanceWidth < 0: # unlikely but do the same as horizontal
verticalAdvanceWidth = 0
topSideBearing = otRound(topSideY - glyph.yMax)
- ttFont["vmtx"].metrics[glyphName] = verticalAdvanceWidth, topSideBearing
+ vMetrics[glyphName] = verticalAdvanceWidth, topSideBearing
+
+
+ # Deprecated
+
+ def _synthesizeVMetrics(self, glyphName, ttFont, defaultVerticalOrigin):
+ """This method is wrong and deprecated.
+ For rationale see:
+ https://github.com/fonttools/fonttools/pull/2266/files#r613569473
+ """
+ vMetrics = getattr(ttFont.get('vmtx'), 'metrics', None)
+ if vMetrics is None:
+ verticalAdvanceWidth = ttFont["head"].unitsPerEm
+ topSideY = getattr(ttFont.get('hhea'), 'ascent', None)
+ if topSideY is None:
+ if defaultVerticalOrigin is not None:
+ topSideY = defaultVerticalOrigin
+ else:
+ topSideY = verticalAdvanceWidth
+ glyph = self[glyphName]
+ glyph.recalcBounds(self)
+ topSideBearing = otRound(topSideY - glyph.yMax)
+ vMetrics = {glyphName: (verticalAdvanceWidth, topSideBearing)}
+ return vMetrics
+
+ @deprecateFunction("use '_getPhantomPoints' instead", category=DeprecationWarning)
+ def getPhantomPoints(self, glyphName, ttFont, defaultVerticalOrigin=None):
+ """Old public name for self._getPhantomPoints().
+ See: https://github.com/fonttools/fonttools/pull/2266"""
+ hMetrics = ttFont['hmtx'].metrics
+ vMetrics = self._synthesizeVMetrics(glyphName, ttFont, defaultVerticalOrigin)
+ return self._getPhantomPoints(glyphName, hMetrics, vMetrics)
+
+ @deprecateFunction("use '_getCoordinatesAndControls' instead", category=DeprecationWarning)
+ def getCoordinatesAndControls(self, glyphName, ttFont, defaultVerticalOrigin=None):
+ """Old public name for self._getCoordinatesAndControls().
+ See: https://github.com/fonttools/fonttools/pull/2266"""
+ hMetrics = ttFont['hmtx'].metrics
+ vMetrics = self._synthesizeVMetrics(glyphName, ttFont, defaultVerticalOrigin)
+ return self._getCoordinatesAndControls(glyphName, hMetrics, vMetrics)
+
+ @deprecateFunction("use '_setCoordinates' instead", category=DeprecationWarning)
+ def setCoordinates(self, glyphName, ttFont):
+ """Old public name for self._setCoordinates().
+ See: https://github.com/fonttools/fonttools/pull/2266"""
+ hMetrics = ttFont['hmtx'].metrics
+ vMetrics = getattr(ttFont.get('vmtx'), 'metrics', None)
+ self._setCoordinates(glyphName, hMetrics, vMetrics)
_GlyphControls = namedtuple(
@@ -488,8 +574,7 @@ def flagEncodeCoord(flag, mask, coord, coordBytes):
elif byteCount == -1:
coordBytes.append(-coord)
elif byteCount == 2:
- coordBytes.append((coord >> 8) & 0xFF)
- coordBytes.append(coord & 0xFF)
+ coordBytes.extend(struct.pack('>h', coord))
def flagEncodeCoords(flag, x, y, xBytes, yBytes):
flagEncodeCoord(flag, flagXsame|flagXShort, x, xBytes)
@@ -515,8 +600,29 @@ CompositeMaxpValues = namedtuple('CompositeMaxpValues', ['nPoints', 'nContours',
class Glyph(object):
+ """This class represents an individual TrueType glyph.
+
+ TrueType glyph objects come in two flavours: simple and composite. Simple
+ glyph objects contain contours, represented via the ``.coordinates``,
+ ``.flags``, ``.numberOfContours``, and ``.endPtsOfContours`` attributes;
+ composite glyphs contain components, available through the ``.components``
+ attributes.
+
+ Because the ``.coordinates`` attribute (and other simple glyph attributes mentioned
+ above) is only set on simple glyphs and the ``.components`` attribute is only
+ set on composite glyphs, it is necessary to use the :py:meth:`isComposite`
+ method to test whether a glyph is simple or composite before attempting to
+ access its data.
+
+ For a composite glyph, the components can also be accessed via array-like access::
- def __init__(self, data=""):
+ >> assert(font["glyf"]["Aacute"].isComposite())
+ >> font["glyf"]["Aacute"][0]
+ <fontTools.ttLib.tables._g_l_y_f.GlyphComponent at 0x1027b2ee0>
+
+ """
+
+ def __init__(self, data=b""):
if not data:
# empty char
self.numberOfContours = 0
@@ -557,7 +663,7 @@ class Glyph(object):
else:
return self.data
if self.numberOfContours == 0:
- return ""
+ return b''
if recalcBBoxes:
self.recalcBounds(glyfTable)
data = sstruct.pack(glyphHeaderFormat, self)
@@ -608,7 +714,7 @@ class Glyph(object):
raise ttLib.TTLibError("can't mix composites and contours in glyph")
self.numberOfContours = self.numberOfContours + 1
coordinates = GlyphCoordinates()
- flags = []
+ flags = bytearray()
for element in content:
if not isinstance(element, tuple):
continue
@@ -616,11 +722,10 @@ class Glyph(object):
if name != "pt":
continue # ignore anything but "pt"
coordinates.append((safeEval(attrs["x"]), safeEval(attrs["y"])))
- flag = not not safeEval(attrs["on"])
+ flag = bool(safeEval(attrs["on"]))
if "overlap" in attrs and bool(safeEval(attrs["overlap"])):
flag |= flagOverlapSimple
flags.append(flag)
- flags = array.array("B", flags)
if not hasattr(self, "coordinates"):
self.coordinates = coordinates
self.flags = flags
@@ -695,16 +800,14 @@ class Glyph(object):
if sys.byteorder != "big": endPtsOfContours.byteswap()
self.endPtsOfContours = endPtsOfContours.tolist()
- data = data[2*self.numberOfContours:]
-
- instructionLength, = struct.unpack(">h", data[:2])
- data = data[2:]
+ pos = 2*self.numberOfContours
+ instructionLength, = struct.unpack(">h", data[pos:pos+2])
self.program = ttProgram.Program()
- self.program.fromBytecode(data[:instructionLength])
- data = data[instructionLength:]
+ self.program.fromBytecode(data[pos+2:pos+2+instructionLength])
+ pos += 2 + instructionLength
nCoordinates = self.endPtsOfContours[-1] + 1
flags, xCoordinates, yCoordinates = \
- self.decompileCoordinatesRaw(nCoordinates, data)
+ self.decompileCoordinatesRaw(nCoordinates, data, pos)
# fill in repetitions and apply signs
self.coordinates = coordinates = GlyphCoordinates.zeros(nCoordinates)
@@ -741,24 +844,26 @@ class Glyph(object):
assert yIndex == len(yCoordinates)
coordinates.relativeToAbsolute()
# discard all flags except "keepFlags"
- self.flags = array.array("B", (f & keepFlags for f in flags))
+ for i in range(len(flags)):
+ flags[i] &= keepFlags
+ self.flags = flags
- def decompileCoordinatesRaw(self, nCoordinates, data):
+ def decompileCoordinatesRaw(self, nCoordinates, data, pos=0):
# unpack flags and prepare unpacking of coordinates
- flags = array.array("B", [0] * nCoordinates)
+ flags = bytearray(nCoordinates)
# Warning: deep Python trickery going on. We use the struct module to unpack
# the coordinates. We build a format string based on the flags, so we can
# unpack the coordinates in one struct.unpack() call.
xFormat = ">" # big endian
yFormat = ">" # big endian
- i = j = 0
+ j = 0
while True:
- flag = byteord(data[i])
- i = i + 1
+ flag = data[pos]
+ pos += 1
repeat = 1
if flag & flagRepeat:
- repeat = byteord(data[i]) + 1
- i = i + 1
+ repeat = data[pos] + 1
+ pos += 1
for k in range(repeat):
if flag & flagXShort:
xFormat = xFormat + 'B'
@@ -773,15 +878,14 @@ class Glyph(object):
if j >= nCoordinates:
break
assert j == nCoordinates, "bad glyph flags"
- data = data[i:]
# unpack raw coordinates, krrrrrr-tching!
xDataLen = struct.calcsize(xFormat)
yDataLen = struct.calcsize(yFormat)
- if len(data) - (xDataLen + yDataLen) >= 4:
+ if len(data) - pos - (xDataLen + yDataLen) >= 4:
log.warning(
- "too much glyph data: %d excess bytes", len(data) - (xDataLen + yDataLen))
- xCoordinates = struct.unpack(xFormat, data[:xDataLen])
- yCoordinates = struct.unpack(yFormat, data[xDataLen:xDataLen+yDataLen])
+ "too much glyph data: %d excess bytes", len(data) - pos - (xDataLen + yDataLen))
+ xCoordinates = struct.unpack(xFormat, data[pos:pos+xDataLen])
+ yCoordinates = struct.unpack(yFormat, data[pos+xDataLen:pos+xDataLen+yDataLen])
return flags, xCoordinates, yCoordinates
def compileComponents(self, glyfTable):
@@ -811,9 +915,7 @@ class Glyph(object):
data.append(instructions)
deltas = self.coordinates.copy()
- if deltas.isFloat():
- # Warn?
- deltas.toInt()
+ deltas.toInt()
deltas.absoluteToRelative()
# TODO(behdad): Add a configuration option for this?
@@ -821,14 +923,14 @@ class Glyph(object):
#deltas = self.compileDeltasOptimal(self.flags, deltas)
data.extend(deltas)
- return bytesjoin(data)
+ return b''.join(data)
def compileDeltasGreedy(self, flags, deltas):
# Implements greedy algorithm for packing coordinate deltas:
# uses shortest representation one coordinate at a time.
- compressedflags = []
- xPoints = []
- yPoints = []
+ compressedFlags = bytearray()
+ compressedXs = bytearray()
+ compressedYs = bytearray()
lastflag = None
repeat = 0
for flag,(x,y) in zip(flags, deltas):
@@ -842,9 +944,9 @@ class Glyph(object):
flag = flag | flagXsame
else:
x = -x
- xPoints.append(bytechr(x))
+ compressedXs.append(x)
else:
- xPoints.append(struct.pack(">h", x))
+ compressedXs.extend(struct.pack('>h', x))
# do y
if y == 0:
flag = flag | flagYsame
@@ -854,24 +956,21 @@ class Glyph(object):
flag = flag | flagYsame
else:
y = -y
- yPoints.append(bytechr(y))
+ compressedYs.append(y)
else:
- yPoints.append(struct.pack(">h", y))
+ compressedYs.extend(struct.pack('>h', y))
# handle repeating flags
if flag == lastflag and repeat != 255:
repeat = repeat + 1
if repeat == 1:
- compressedflags.append(flag)
+ compressedFlags.append(flag)
else:
- compressedflags[-2] = flag | flagRepeat
- compressedflags[-1] = repeat
+ compressedFlags[-2] = flag | flagRepeat
+ compressedFlags[-1] = repeat
else:
repeat = 0
- compressedflags.append(flag)
+ compressedFlags.append(flag)
lastflag = flag
- compressedFlags = array.array("B", compressedflags).tobytes()
- compressedXs = bytesjoin(xPoints)
- compressedYs = bytesjoin(yPoints)
return (compressedFlags, compressedXs, compressedYs)
def compileDeltasOptimal(self, flags, deltas):
@@ -902,9 +1001,9 @@ class Glyph(object):
flags.append(flag)
flags.reverse()
- compressedFlags = array.array("B")
- compressedXs = array.array("B")
- compressedYs = array.array("B")
+ compressedFlags = bytearray()
+ compressedXs = bytearray()
+ compressedYs = bytearray()
coords = iter(deltas)
ff = []
for flag in flags:
@@ -924,72 +1023,22 @@ class Glyph(object):
raise Exception("internal error")
except StopIteration:
pass
- compressedFlags = compressedFlags.tobytes()
- compressedXs = compressedXs.tobytes()
- compressedYs = compressedYs.tobytes()
return (compressedFlags, compressedXs, compressedYs)
def recalcBounds(self, glyfTable):
+ """Recalculates the bounds of the glyph.
+
+ Each glyph object stores its bounding box in the
+ ``xMin``/``yMin``/``xMax``/``yMax`` attributes. These bounds must be
+ recomputed when the ``coordinates`` change. The ``table__g_l_y_f`` bounds
+ must be provided to resolve component bounds.
+ """
coords, endPts, flags = self.getCoordinates(glyfTable)
- if len(coords) > 0:
- if 0:
- # This branch calculates exact glyph outline bounds
- # analytically, handling cases without on-curve
- # extremas, etc. However, the glyf table header
- # simply says that the bounds should be min/max x/y
- # "for coordinate data", so I suppose that means no
- # fancy thing here, just get extremas of all coord
- # points (on and off). As such, this branch is
- # disabled.
-
- # Collect on-curve points
- onCurveCoords = [coords[j] for j in range(len(coords))
- if flags[j] & flagOnCurve]
- # Add implicit on-curve points
- start = 0
- for end in endPts:
- last = end
- for j in range(start, end + 1):
- if not ((flags[j] | flags[last]) & flagOnCurve):
- x = (coords[last][0] + coords[j][0]) / 2
- y = (coords[last][1] + coords[j][1]) / 2
- onCurveCoords.append((x,y))
- last = j
- start = end + 1
- # Add bounds for curves without an explicit extrema
- start = 0
- for end in endPts:
- last = end
- for j in range(start, end + 1):
- if not (flags[j] & flagOnCurve):
- next = j + 1 if j < end else start
- bbox = calcBounds([coords[last], coords[next]])
- if not pointInRect(coords[j], bbox):
- # Ouch!
- log.warning("Outline has curve with implicit extrema.")
- # Ouch! Find analytical curve bounds.
- pthis = coords[j]
- plast = coords[last]
- if not (flags[last] & flagOnCurve):
- plast = ((pthis[0]+plast[0])/2, (pthis[1]+plast[1])/2)
- pnext = coords[next]
- if not (flags[next] & flagOnCurve):
- pnext = ((pthis[0]+pnext[0])/2, (pthis[1]+pnext[1])/2)
- bbox = calcQuadraticBounds(plast, pthis, pnext)
- onCurveCoords.append((bbox[0],bbox[1]))
- onCurveCoords.append((bbox[2],bbox[3]))
- last = j
- start = end + 1
-
- self.xMin, self.yMin, self.xMax, self.yMax = calcIntBounds(onCurveCoords)
- else:
- self.xMin, self.yMin, self.xMax, self.yMax = calcIntBounds(coords)
- else:
- self.xMin, self.yMin, self.xMax, self.yMax = (0, 0, 0, 0)
+ self.xMin, self.yMin, self.xMax, self.yMax = calcIntBounds(coords)
def isComposite(self):
- """Can be called on compact or expanded glyph."""
+ """Test whether a glyph has components"""
if hasattr(self, "data") and self.data:
return struct.unpack(">h", self.data[:2])[0] == -1
else:
@@ -1001,12 +1050,27 @@ class Glyph(object):
return self.components[componentIndex]
def getCoordinates(self, glyfTable):
+ """Return the coordinates, end points and flags
+
+ This method returns three values: A :py:class:`GlyphCoordinates` object,
+ a list of the indexes of the final points of each contour (allowing you
+ to split up the coordinates list into contours) and a list of flags.
+
+ On simple glyphs, this method returns information from the glyph's own
+ contours; on composite glyphs, it "flattens" all components recursively
+ to return a list of coordinates representing all the components involved
+ in the glyph.
+
+ To interpret the flags for each point, see the "Simple Glyph Flags"
+ section of the `glyf table specification <https://docs.microsoft.com/en-us/typography/opentype/spec/glyf#simple-glyph-description>`.
+ """
+
if self.numberOfContours > 0:
return self.coordinates, self.endPtsOfContours, self.flags
elif self.isComposite():
# it's a composite
allCoords = GlyphCoordinates()
- allFlags = array.array("B")
+ allFlags = bytearray()
allEndPts = []
for compo in self.components:
g = glyfTable[compo.glyphName]
@@ -1051,9 +1115,14 @@ class Glyph(object):
allFlags.extend(flags)
return allCoords, allEndPts, allFlags
else:
- return GlyphCoordinates(), [], array.array("B")
+ return GlyphCoordinates(), [], bytearray()
def getComponentNames(self, glyfTable):
+ """Returns a list of names of component glyphs used in this glyph
+
+ This method can be used on simple glyphs (in which case it returns an
+ empty list) or composite glyphs.
+ """
if not hasattr(self, "data"):
if self.isComposite():
return [c.glyphName for c in self.components]
@@ -1101,7 +1170,7 @@ class Glyph(object):
if not self.data:
return
numContours = struct.unpack(">h", self.data[:2])[0]
- data = array.array("B", self.data)
+ data = bytearray(self.data)
i = 10
if numContours >= 0:
i += 2 * numContours # endPtsOfContours
@@ -1170,12 +1239,21 @@ class Glyph(object):
# Remove padding
data = data[:i]
- self.data = data.tobytes()
+ self.data = data
def removeHinting(self):
+ """Removes TrueType hinting instructions from the glyph."""
self.trim (remove_hinting=True)
def draw(self, pen, glyfTable, offset=0):
+ """Draws the glyph using the supplied pen object.
+
+ Arguments:
+ pen: An object conforming to the pen protocol.
+ glyfTable: A :py:class:`table__g_l_y_f` object, to resolve components.
+ offset (int): A horizontal offset. If provided, all coordinates are
+ translated by this offset.
+ """
if self.isComposite():
for component in self.components:
@@ -1221,7 +1299,7 @@ class Glyph(object):
pen.closePath()
def drawPoints(self, pen, glyfTable, offset=0):
- """Draw the glyph using the supplied pointPen. Opposed to Glyph.draw(),
+ """Draw the glyph using the supplied pointPen. As opposed to Glyph.draw(),
this will not change the point indices.
"""
@@ -1263,12 +1341,29 @@ class Glyph(object):
return result if result is NotImplemented else not result
class GlyphComponent(object):
+ """Represents a component within a composite glyph.
+
+ The component is represented internally with four attributes: ``glyphName``,
+ ``x``, ``y`` and ``transform``. If there is no "two-by-two" matrix (i.e
+ no scaling, reflection, or rotation; only translation), the ``transform``
+ attribute is not present.
+ """
+ # The above documentation is not *completely* true, but is *true enough* because
+ # the rare firstPt/lastPt attributes are not totally supported and nobody seems to
+ # mind - see below.
def __init__(self):
pass
def getComponentInfo(self):
- """Return the base glyph name and a transform."""
+ """Return information about the component
+
+ This method returns a tuple of two values: the glyph name of the component's
+ base glyph, and a transformation matrix. As opposed to accessing the attributes
+ directly, ``getComponentInfo`` always returns a six-element tuple of the
+ component's transformation matrix, even when the two-by-two ``.transform``
+ matrix is not present.
+ """
# XXX Ignoring self.firstPt & self.lastpt for now: I need to implement
# something equivalent in fontTools.objects.glyph (I'd rather not
# convert it to an absolute offset, since it is valuable information).
@@ -1431,65 +1526,60 @@ class GlyphComponent(object):
return result if result is NotImplemented else not result
class GlyphCoordinates(object):
+ """A list of glyph coordinates.
- def __init__(self, iterable=[], typecode="h"):
- self._a = array.array(typecode)
+ Unlike an ordinary list, this is a numpy-like matrix object which supports
+ matrix addition, scalar multiplication and other operations described below.
+ """
+ def __init__(self, iterable=[]):
+ self._a = array.array('d')
self.extend(iterable)
@property
def array(self):
+ """Returns the underlying array of coordinates"""
return self._a
- def isFloat(self):
- return self._a.typecode == 'd'
-
- def _ensureFloat(self):
- if self.isFloat():
- return
- # The conversion to list() is to work around Jython bug
- self._a = array.array("d", list(self._a))
-
- def _checkFloat(self, p):
- if self.isFloat():
- return p
- if any(v > 0x7FFF or v < -0x8000 for v in p):
- self._ensureFloat()
- return p
- if any(isinstance(v, float) for v in p):
- p = [int(v) if int(v) == v else v for v in p]
- if any(isinstance(v, float) for v in p):
- self._ensureFloat()
- return p
-
@staticmethod
def zeros(count):
- return GlyphCoordinates([(0,0)] * count)
+ """Creates a new ``GlyphCoordinates`` object with all coordinates set to (0,0)"""
+ g = GlyphCoordinates()
+ g._a.frombytes(bytes(count * 2 * g._a.itemsize))
+ return g
def copy(self):
- c = GlyphCoordinates(typecode=self._a.typecode)
+ """Creates a new ``GlyphCoordinates`` object which is a copy of the current one."""
+ c = GlyphCoordinates()
c._a.extend(self._a)
return c
def __len__(self):
+ """Returns the number of coordinates in the array."""
return len(self._a) // 2
def __getitem__(self, k):
+ """Returns a two element tuple (x,y)"""
if isinstance(k, slice):
indices = range(*k.indices(len(self)))
return [self[i] for i in indices]
- return self._a[2*k],self._a[2*k+1]
+ a = self._a
+ x = a[2*k]
+ y = a[2*k+1]
+ return (int(x) if x.is_integer() else x,
+ int(y) if y.is_integer() else y)
def __setitem__(self, k, v):
+ """Sets a point's coordinates to a two element tuple (x,y)"""
if isinstance(k, slice):
indices = range(*k.indices(len(self)))
# XXX This only works if len(v) == len(indices)
for j,i in enumerate(indices):
self[i] = v[j]
return
- v = self._checkFloat(v)
self._a[2*k],self._a[2*k+1] = v
def __delitem__(self, i):
+ """Removes a point from the list"""
i = (2*i) % len(self._a)
del self._a[i]
del self._a[i]
@@ -1498,69 +1588,71 @@ class GlyphCoordinates(object):
return 'GlyphCoordinates(['+','.join(str(c) for c in self)+'])'
def append(self, p):
- p = self._checkFloat(p)
self._a.extend(tuple(p))
def extend(self, iterable):
for p in iterable:
- p = self._checkFloat(p)
self._a.extend(p)
def toInt(self, *, round=otRound):
- if not self.isFloat():
- return
- a = array.array("h")
- for n in self._a:
- a.append(round(n))
- self._a = a
+ a = self._a
+ for i in range(len(a)):
+ a[i] = round(a[i])
def relativeToAbsolute(self):
a = self._a
x,y = 0,0
- for i in range(len(a) // 2):
- x = a[2*i ] + x
- y = a[2*i+1] + y
- self[i] = (x, y)
+ for i in range(0, len(a), 2):
+ a[i ] = x = a[i ] + x
+ a[i+1] = y = a[i+1] + y
def absoluteToRelative(self):
a = self._a
x,y = 0,0
- for i in range(len(a) // 2):
- dx = a[2*i ] - x
- dy = a[2*i+1] - y
- x = a[2*i ]
- y = a[2*i+1]
- self[i] = (dx, dy)
+ for i in range(0, len(a), 2):
+ nx = a[i ]
+ ny = a[i+1]
+ a[i] = nx - x
+ a[i+1] = ny - y
+ x = nx
+ y = ny
def translate(self, p):
"""
>>> GlyphCoordinates([(1,2)]).translate((.5,0))
"""
- (x,y) = self._checkFloat(p)
+ x,y = p
+ if x == 0 and y == 0:
+ return
a = self._a
- for i in range(len(a) // 2):
- self[i] = (a[2*i] + x, a[2*i+1] + y)
+ for i in range(0, len(a), 2):
+ a[i] += x
+ a[i+1] += y
def scale(self, p):
"""
>>> GlyphCoordinates([(1,2)]).scale((.5,0))
"""
- (x,y) = self._checkFloat(p)
+ x,y = p
+ if x == 1 and y == 1:
+ return
a = self._a
- for i in range(len(a) // 2):
- self[i] = (a[2*i] * x, a[2*i+1] * y)
+ for i in range(0, len(a), 2):
+ a[i] *= x
+ a[i+1] *= y
def transform(self, t):
"""
>>> GlyphCoordinates([(1,2)]).transform(((.5,0),(.2,.5)))
"""
a = self._a
- for i in range(len(a) // 2):
- x = a[2*i ]
- y = a[2*i+1]
+ for i in range(0, len(a), 2):
+ x = a[i ]
+ y = a[i+1]
px = x * t[0][0] + y * t[1][0]
py = x * t[0][1] + y * t[1][1]
- self[i] = (px, py)
+ a[i] = px
+ a[i+1] = py
def __eq__(self, other):
"""
@@ -1645,23 +1737,22 @@ class GlyphCoordinates(object):
>>> g = GlyphCoordinates([(1,2)])
>>> g += (.5,0)
>>> g
- GlyphCoordinates([(1.5, 2.0)])
+ GlyphCoordinates([(1.5, 2)])
>>> g2 = GlyphCoordinates([(3,4)])
>>> g += g2
>>> g
- GlyphCoordinates([(4.5, 6.0)])
+ GlyphCoordinates([(4.5, 6)])
"""
if isinstance(other, tuple):
assert len(other) == 2
self.translate(other)
return self
if isinstance(other, GlyphCoordinates):
- if other.isFloat(): self._ensureFloat()
other = other._a
a = self._a
assert len(a) == len(other)
- for i in range(len(a) // 2):
- self[i] = (a[2*i] + other[2*i], a[2*i+1] + other[2*i+1])
+ for i in range(len(a)):
+ a[i] += other[i]
return self
return NotImplemented
@@ -1670,23 +1761,22 @@ class GlyphCoordinates(object):
>>> g = GlyphCoordinates([(1,2)])
>>> g -= (.5,0)
>>> g
- GlyphCoordinates([(0.5, 2.0)])
+ GlyphCoordinates([(0.5, 2)])
>>> g2 = GlyphCoordinates([(3,4)])
>>> g -= g2
>>> g
- GlyphCoordinates([(-2.5, -2.0)])
+ GlyphCoordinates([(-2.5, -2)])
"""
if isinstance(other, tuple):
assert len(other) == 2
self.translate((-other[0],-other[1]))
return self
if isinstance(other, GlyphCoordinates):
- if other.isFloat(): self._ensureFloat()
other = other._a
a = self._a
assert len(a) == len(other)
- for i in range(len(a) // 2):
- self[i] = (a[2*i] - other[2*i], a[2*i+1] - other[2*i+1])
+ for i in range(len(a)):
+ a[i] -= other[i]
return self
return NotImplemented
@@ -1696,20 +1786,23 @@ class GlyphCoordinates(object):
>>> g *= (2,.5)
>>> g *= 2
>>> g
- GlyphCoordinates([(4.0, 2.0)])
+ GlyphCoordinates([(4, 2)])
>>> g = GlyphCoordinates([(1,2)])
>>> g *= 2
>>> g
GlyphCoordinates([(2, 4)])
"""
- if isinstance(other, Number):
- other = (other, other)
if isinstance(other, tuple):
- if other == (1,1):
- return self
assert len(other) == 2
self.scale(other)
return self
+ if isinstance(other, Number):
+ if other == 1:
+ return self
+ a = self._a
+ for i in range(len(a)):
+ a[i] *= other
+ return self
return NotImplemented
def __itruediv__(self, other):
@@ -1718,7 +1811,7 @@ class GlyphCoordinates(object):
>>> g /= (.5,1.5)
>>> g /= 2
>>> g
- GlyphCoordinates([(1.0, 1.0)])
+ GlyphCoordinates([(1, 1)])
"""
if isinstance(other, Number):
other = (other, other)
@@ -1750,20 +1843,6 @@ class GlyphCoordinates(object):
__nonzero__ = __bool__
-def reprflag(flag):
- bin = ""
- if isinstance(flag, str):
- flag = byteord(flag)
- while flag:
- if flag & 0x01:
- bin = "1" + bin
- else:
- bin = "0" + bin
- flag = flag >> 1
- bin = (14 - len(bin)) * "0" + bin
- return bin
-
-
if __name__ == "__main__":
import doctest, sys
sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/ttLib/tables/_g_v_a_r.py b/Lib/fontTools/ttLib/tables/_g_v_a_r.py
index 8c9b530e..bc283cfe 100644
--- a/Lib/fontTools/ttLib/tables/_g_v_a_r.py
+++ b/Lib/fontTools/ttLib/tables/_g_v_a_r.py
@@ -1,4 +1,3 @@
-from fontTools.misc.py23 import bytesjoin
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from . import DefaultTable
@@ -76,12 +75,13 @@ class table__g_v_a_r(DefaultTable.DefaultTable):
result = [compiledHeader, compiledOffsets]
result.extend(sharedTuples)
result.extend(compiledGlyphs)
- return bytesjoin(result)
+ return b''.join(result)
def compileGlyphs_(self, ttFont, axisTags, sharedCoordIndices):
result = []
+ glyf = ttFont['glyf']
for glyphName in ttFont.getGlyphOrder():
- glyph = ttFont["glyf"][glyphName]
+ glyph = glyf[glyphName]
pointCount = self.getNumPoints_(glyph)
variations = self.variations.get(glyphName, [])
result.append(compileGlyph_(variations, pointCount,
@@ -99,9 +99,10 @@ class table__g_v_a_r(DefaultTable.DefaultTable):
axisTags, self.sharedTupleCount, data, self.offsetToSharedTuples)
self.variations = {}
offsetToData = self.offsetToGlyphVariationData
+ glyf = ttFont['glyf']
for i in range(self.glyphCount):
glyphName = glyphs[i]
- glyph = ttFont["glyf"][glyphName]
+ glyph = glyf[glyphName]
numPointsInGlyph = self.getNumPoints_(glyph)
gvarData = data[offsetToData + offsets[i] : offsetToData + offsets[i + 1]]
try:
@@ -214,12 +215,14 @@ def compileGlyph_(variations, pointCount, axisTags, sharedCoordIndices):
variations, pointCount, axisTags, sharedCoordIndices)
if tupleVariationCount == 0:
return b""
- result = (
- struct.pack(">HH", tupleVariationCount, 4 + len(tuples)) + tuples + data
- )
- if len(result) % 2 != 0:
- result = result + b"\0" # padding
- return result
+ result = [
+ struct.pack(">HH", tupleVariationCount, 4 + len(tuples)),
+ tuples,
+ data
+ ]
+ if (len(tuples) + len(data)) % 2 != 0:
+ result.append(b"\0") # padding
+ return b''.join(result)
def decompileGlyph_(pointCount, sharedTuples, axisTags, data):
diff --git a/Lib/fontTools/ttLib/tables/_h_d_m_x.py b/Lib/fontTools/ttLib/tables/_h_d_m_x.py
index 954d1bc1..9f860d2a 100644
--- a/Lib/fontTools/ttLib/tables/_h_d_m_x.py
+++ b/Lib/fontTools/ttLib/tables/_h_d_m_x.py
@@ -1,5 +1,5 @@
-from fontTools.misc.py23 import bytechr, byteord, strjoin
from fontTools.misc import sstruct
+from fontTools.misc.textTools import bytechr, byteord, strjoin
from . import DefaultTable
import array
from collections.abc import Mapping
diff --git a/Lib/fontTools/ttLib/tables/_l_t_a_g.py b/Lib/fontTools/ttLib/tables/_l_t_a_g.py
index caec72a3..ce3c6b97 100644
--- a/Lib/fontTools/ttLib/tables/_l_t_a_g.py
+++ b/Lib/fontTools/ttLib/tables/_l_t_a_g.py
@@ -1,5 +1,4 @@
-from fontTools.misc.py23 import bytesjoin, tobytes
-from fontTools.misc.textTools import safeEval
+from fontTools.misc.textTools import bytesjoin, tobytes, safeEval
from . import DefaultTable
import struct
diff --git a/Lib/fontTools/ttLib/tables/_m_e_t_a.py b/Lib/fontTools/ttLib/tables/_m_e_t_a.py
index 1a125f82..3faf0a56 100644
--- a/Lib/fontTools/ttLib/tables/_m_e_t_a.py
+++ b/Lib/fontTools/ttLib/tables/_m_e_t_a.py
@@ -1,6 +1,5 @@
-from fontTools.misc.py23 import bytesjoin, strjoin
from fontTools.misc import sstruct
-from fontTools.misc.textTools import readHex
+from fontTools.misc.textTools import bytesjoin, strjoin, readHex
from fontTools.ttLib import TTLibError
from . import DefaultTable
diff --git a/Lib/fontTools/ttLib/tables/_n_a_m_e.py b/Lib/fontTools/ttLib/tables/_n_a_m_e.py
index 206469de..9558addb 100644
--- a/Lib/fontTools/ttLib/tables/_n_a_m_e.py
+++ b/Lib/fontTools/ttLib/tables/_n_a_m_e.py
@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
-from fontTools.misc.py23 import bytechr, byteord, bytesjoin, strjoin, tobytes, tostr
from fontTools.misc import sstruct
-from fontTools.misc.textTools import safeEval
+from fontTools.misc.textTools import bytechr, byteord, bytesjoin, strjoin, tobytes, tostr, safeEval
from fontTools.misc.encodingTools import getEncoding
from fontTools.ttLib import newTable
from . import DefaultTable
@@ -121,6 +120,44 @@ class table__n_a_m_e(DefaultTable.DefaultTable):
else:
return None
+ def getFirstDebugName(self, nameIDs):
+ for nameID in nameIDs:
+ name = self.getDebugName(nameID)
+ if name is not None:
+ return name
+ return None
+
+ def getBestFamilyName(self):
+ # 21 = WWS Family Name
+ # 16 = Typographic Family Name
+ # 1 = Family Name
+ return self.getFirstDebugName((21, 16, 1))
+
+ def getBestSubFamilyName(self):
+ # 22 = WWS SubFamily Name
+ # 17 = Typographic SubFamily Name
+ # 2 = SubFamily Name
+ return self.getFirstDebugName((22, 17, 2))
+
+ def getBestFullName(self):
+ # 4 = Full Name
+ # 6 = PostScript Name
+ for nameIDs in ((21, 22), (16, 17), (1, 2), (4, ), (6, )):
+ if len(nameIDs) == 2:
+ name_fam = self.getDebugName(nameIDs[0])
+ name_subfam = self.getDebugName(nameIDs[1])
+ if None in [name_fam, name_subfam]:
+ continue # if any is None, skip
+ name = f"{name_fam} {name_subfam}"
+ if name_subfam.lower() == 'regular':
+ name = f"{name_fam}"
+ return name
+ else:
+ name = self.getDebugName(nameIDs[0])
+ if name is not None:
+ return name
+ return None
+
def setName(self, string, nameID, platformID, platEncID, langID):
""" Set the 'string' for the name record identified by 'nameID', 'platformID',
'platEncID' and 'langID'. If a record with that nameID doesn't exist, create it
diff --git a/Lib/fontTools/ttLib/tables/_p_o_s_t.py b/Lib/fontTools/ttLib/tables/_p_o_s_t.py
index e26e81f8..c54b87f0 100644
--- a/Lib/fontTools/ttLib/tables/_p_o_s_t.py
+++ b/Lib/fontTools/ttLib/tables/_p_o_s_t.py
@@ -1,13 +1,14 @@
-from fontTools.misc.py23 import bytechr, byteord, tobytes, tostr
from fontTools import ttLib
from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder
from fontTools.misc import sstruct
-from fontTools.misc.textTools import safeEval, readHex
+from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval, readHex
from . import DefaultTable
import sys
import struct
import array
+import logging
+log = logging.getLogger(__name__)
postFormat = """
>
@@ -85,7 +86,8 @@ class table__p_o_s_t(DefaultTable.DefaultTable):
indices.frombytes(data[:2*numGlyphs])
if sys.byteorder != "big": indices.byteswap()
data = data[2*numGlyphs:]
- self.extraNames = extraNames = unpackPStrings(data)
+ maxIndex = max(indices)
+ self.extraNames = extraNames = unpackPStrings(data, maxIndex-257)
self.glyphOrder = glyphOrder = [""] * int(ttFont['maxp'].numGlyphs)
for glyphID in range(numGlyphs):
index = indices[glyphID]
@@ -252,14 +254,34 @@ class table__p_o_s_t(DefaultTable.DefaultTable):
self.data = readHex(content)
-def unpackPStrings(data):
+def unpackPStrings(data, n):
+ # extract n Pascal strings from data.
+ # if there is not enough data, use ""
+
strings = []
index = 0
dataLen = len(data)
- while index < dataLen:
- length = byteord(data[index])
- strings.append(tostr(data[index+1:index+1+length], encoding="latin1"))
- index = index + 1 + length
+
+ for _ in range(n):
+ if dataLen <= index:
+ length = 0
+ else:
+ length = byteord(data[index])
+ index += 1
+
+ if dataLen <= index + length - 1:
+ name = ""
+ else:
+ name = tostr(data[index:index+length], encoding="latin1")
+ strings.append (name)
+ index += length
+
+ if index < dataLen:
+ log.warning("%d extra bytes in post.stringData array", dataLen - index)
+
+ elif dataLen < index:
+ log.warning("not enough data in post.stringData array")
+
return strings
diff --git a/Lib/fontTools/ttLib/tables/_t_r_a_k.py b/Lib/fontTools/ttLib/tables/_t_r_a_k.py
index 7f3227dc..3052496f 100644
--- a/Lib/fontTools/ttLib/tables/_t_r_a_k.py
+++ b/Lib/fontTools/ttLib/tables/_t_r_a_k.py
@@ -1,4 +1,3 @@
-from fontTools.misc.py23 import bytesjoin
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
@@ -6,7 +5,7 @@ from fontTools.misc.fixedTools import (
floatToFixedToStr as fl2str,
strToFixedToFloat as str2fl,
)
-from fontTools.misc.textTools import safeEval
+from fontTools.misc.textTools import bytesjoin, safeEval
from fontTools.ttLib import TTLibError
from . import DefaultTable
import struct
diff --git a/Lib/fontTools/ttLib/tables/asciiTable.py b/Lib/fontTools/ttLib/tables/asciiTable.py
index 7b036c8e..a97d92df 100644
--- a/Lib/fontTools/ttLib/tables/asciiTable.py
+++ b/Lib/fontTools/ttLib/tables/asciiTable.py
@@ -1,4 +1,4 @@
-from fontTools.misc.py23 import strjoin, tobytes, tostr
+from fontTools.misc.textTools import strjoin, tobytes, tostr
from . import DefaultTable
diff --git a/Lib/fontTools/ttLib/tables/otBase.py b/Lib/fontTools/ttLib/tables/otBase.py
index 3c07f9e1..bc2c9fba 100644
--- a/Lib/fontTools/ttLib/tables/otBase.py
+++ b/Lib/fontTools/ttLib/tables/otBase.py
@@ -1,9 +1,10 @@
-from fontTools.misc.py23 import Tag, bytesjoin
+from fontTools.misc.textTools import Tag, bytesjoin
from .DefaultTable import DefaultTable
import sys
import array
import struct
import logging
+from typing import Iterator, NamedTuple, Optional
log = logging.getLogger(__name__)
@@ -34,6 +35,7 @@ class BaseTTXConverter(DefaultTable):
"""
def decompile(self, data, font):
+ """Create an object from the binary data. Called automatically on access."""
from . import otTables
reader = OTTableReader(data, tableTag=self.tableTag)
tableClass = getattr(otTables, self.tableTag)
@@ -41,26 +43,28 @@ class BaseTTXConverter(DefaultTable):
self.table.decompile(reader, font)
def compile(self, font):
- """ Create a top-level OTTableWriter for the GPOS/GSUB table.
- Call the compile method for the the table
- for each 'converter' record in the table converter list
- call converter's write method for each item in the value.
- - For simple items, the write method adds a string to the
- writer's self.items list.
- - For Struct/Table/Subtable items, it add first adds new writer to the
- to the writer's self.items, then calls the item's compile method.
- This creates a tree of writers, rooted at the GUSB/GPOS writer, with
- each writer representing a table, and the writer.items list containing
- the child data strings and writers.
- call the getAllData method
- call _doneWriting, which removes duplicates
- call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables
- Traverse the flat list of tables, calling getDataLength on each to update their position
- Traverse the flat list of tables again, calling getData each get the data in the table, now that
- pos's and offset are known.
-
- If a lookup subtable overflows an offset, we have to start all over.
- """
+ """Compiles the table into binary. Called automatically on save."""
+
+ # General outline:
+ # Create a top-level OTTableWriter for the GPOS/GSUB table.
+ # Call the compile method for the the table
+ # for each 'converter' record in the table converter list
+ # call converter's write method for each item in the value.
+ # - For simple items, the write method adds a string to the
+ # writer's self.items list.
+ # - For Struct/Table/Subtable items, it add first adds new writer to the
+ # to the writer's self.items, then calls the item's compile method.
+ # This creates a tree of writers, rooted at the GUSB/GPOS writer, with
+ # each writer representing a table, and the writer.items list containing
+ # the child data strings and writers.
+ # call the getAllData method
+ # call _doneWriting, which removes duplicates
+ # call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables
+ # Traverse the flat list of tables, calling getDataLength on each to update their position
+ # Traverse the flat list of tables again, calling getData each get the data in the table, now that
+ # pos's and offset are known.
+
+ # If a lookup subtable overflows an offset, we have to start all over.
overflowRecord = None
while True:
@@ -105,6 +109,13 @@ class BaseTTXConverter(DefaultTable):
self.table.fromXML(name, attrs, content, font)
self.table.populateDefaults()
+ def ensureDecompiled(self):
+ self.table.ensureDecompiled(recurse=True)
+
+
+# https://github.com/fonttools/fonttools/pull/2285#issuecomment-834652928
+assert len(struct.pack('i', 0)) == 4
+assert array.array('i').itemsize == 4, "Oops, file a bug against fonttools."
class OTTableReader(object):
@@ -140,32 +151,43 @@ class OTTableReader(object):
value, = struct.unpack(f">{typecode}", self.data[pos:newpos])
self.pos = newpos
return value
-
- def readUShort(self):
- return self.readValue("H", staticSize=2)
-
def readArray(self, typecode, staticSize, count):
pos = self.pos
newpos = pos + count * staticSize
value = array.array(typecode, self.data[pos:newpos])
if sys.byteorder != "big": value.byteswap()
self.pos = newpos
- return value
-
- def readUShortArray(self, count):
- return self.readArray("H", staticSize=2, count=count)
+ return value.tolist()
def readInt8(self):
return self.readValue("b", staticSize=1)
+ def readInt8Array(self, count):
+ return self.readArray("b", staticSize=1, count=count)
def readShort(self):
return self.readValue("h", staticSize=2)
+ def readShortArray(self, count):
+ return self.readArray("h", staticSize=2, count=count)
def readLong(self):
- return self.readValue("l", staticSize=4)
+ return self.readValue("i", staticSize=4)
+ def readLongArray(self, count):
+ return self.readArray("i", staticSize=4, count=count)
def readUInt8(self):
return self.readValue("B", staticSize=1)
+ def readUInt8Array(self, count):
+ return self.readArray("B", staticSize=1, count=count)
+
+ def readUShort(self):
+ return self.readValue("H", staticSize=2)
+ def readUShortArray(self, count):
+ return self.readArray("H", staticSize=2, count=count)
+
+ def readULong(self):
+ return self.readValue("I", staticSize=4)
+ def readULongArray(self, count):
+ return self.readArray("I", staticSize=4, count=count)
def readUInt24(self):
pos = self.pos
@@ -173,9 +195,8 @@ class OTTableReader(object):
value, = struct.unpack(">l", b'\0'+self.data[pos:newpos])
self.pos = newpos
return value
-
- def readULong(self):
- return self.readValue("L", staticSize=4)
+ def readUInt24Array(self, count):
+ return [self.readUInt24() for _ in range(count)]
def readTag(self):
pos = self.pos
@@ -316,6 +337,12 @@ class OTTableWriter(object):
items[i] = item.getCountData()
elif hasattr(item, "getData"):
item._doneWriting(internedTables)
+ # At this point, all subwriters are hashable based on their items.
+ # (See hash and comparison magic methods above.) So the ``setdefault``
+ # call here will return the first writer object we've seen with
+ # equal content, or store it in the dictionary if it's not been
+ # seen yet. We therefore replace the subwriter object with an equivalent
+ # object, which deduplicates the tree.
if not dontShare:
items[i] = item = internedTables.setdefault(item, item)
self.items = tuple(items)
@@ -344,13 +371,13 @@ class OTTableWriter(object):
tables, extTables, done = extTables, None, {}
# add Coverage table if it is sorted last.
- sortCoverageLast = 0
+ sortCoverageLast = False
if hasattr(self, "sortCoverageLast"):
# Find coverage table
for i in range(numItems):
item = self.items[i]
- if hasattr(item, "name") and (item.name == "Coverage"):
- sortCoverageLast = 1
+ if getattr(item, 'name', None) == "Coverage":
+ sortCoverageLast = True
break
if id(item) not in done:
item._gatherTables(tables, extTables, done)
@@ -363,7 +390,7 @@ class OTTableWriter(object):
if not hasattr(item, "getData"):
continue
- if sortCoverageLast and (i==1) and item.name == 'Coverage':
+ if sortCoverageLast and (i==1) and getattr(item, 'name', None) == 'Coverage':
# we've already 'gathered' it above
continue
@@ -419,33 +446,52 @@ class OTTableWriter(object):
def writeValue(self, typecode, value):
self.items.append(struct.pack(f">{typecode}", value))
+ def writeArray(self, typecode, values):
+ a = array.array(typecode, values)
+ if sys.byteorder != "big": a.byteswap()
+ self.items.append(a.tobytes())
- def writeUShort(self, value):
- assert 0 <= value < 0x10000, value
- self.items.append(struct.pack(">H", value))
+ def writeInt8(self, value):
+ assert -128 <= value < 128, value
+ self.items.append(struct.pack(">b", value))
+ def writeInt8Array(self, values):
+ self.writeArray('b', values)
def writeShort(self, value):
assert -32768 <= value < 32768, value
self.items.append(struct.pack(">h", value))
+ def writeShortArray(self, values):
+ self.writeArray('h', values)
+
+ def writeLong(self, value):
+ self.items.append(struct.pack(">i", value))
+ def writeLongArray(self, values):
+ self.writeArray('i', values)
def writeUInt8(self, value):
assert 0 <= value < 256, value
self.items.append(struct.pack(">B", value))
+ def writeUInt8Array(self, values):
+ self.writeArray('B', values)
- def writeInt8(self, value):
- assert -128 <= value < 128, value
- self.items.append(struct.pack(">b", value))
+ def writeUShort(self, value):
+ assert 0 <= value < 0x10000, value
+ self.items.append(struct.pack(">H", value))
+ def writeUShortArray(self, values):
+ self.writeArray('H', values)
+
+ def writeULong(self, value):
+ self.items.append(struct.pack(">I", value))
+ def writeULongArray(self, values):
+ self.writeArray('I', values)
def writeUInt24(self, value):
assert 0 <= value < 0x1000000, value
b = struct.pack(">L", value)
self.items.append(b[1:])
-
- def writeLong(self, value):
- self.items.append(struct.pack(">l", value))
-
- def writeULong(self, value):
- self.items.append(struct.pack(">L", value))
+ def writeUInt24Array(self, values):
+ for value in values:
+ self.writeUInt24(value)
def writeTag(self, tag):
tag = Tag(tag).tobytes()
@@ -532,11 +578,11 @@ def packUShort(value):
def packULong(value):
assert 0 <= value < 0x100000000, value
- return struct.pack(">L", value)
+ return struct.pack(">I", value)
def packUInt24(value):
assert 0 <= value < 0x1000000, value
- return struct.pack(">L", value)[1:]
+ return struct.pack(">I", value)[1:]
class BaseTable(object):
@@ -554,13 +600,16 @@ class BaseTable(object):
raise AttributeError(attr)
- def ensureDecompiled(self):
+ def ensureDecompiled(self, recurse=False):
reader = self.__dict__.get("reader")
if reader:
del self.reader
font = self.font
del self.font
self.decompile(reader, font)
+ if recurse:
+ for subtable in self.iterSubTables():
+ subtable.value.ensureDecompiled(recurse)
@classmethod
def getRecordSize(cls, reader):
@@ -571,7 +620,7 @@ class BaseTable(object):
countValue = 1
if conv.repeat:
if conv.repeat in reader:
- countValue = reader[conv.repeat]
+ countValue = reader[conv.repeat] + conv.aux
else:
return NotImplemented
totalSize += size * countValue
@@ -698,14 +747,11 @@ class BaseTable(object):
else:
# conv.repeat is a propagated count
writer[conv.repeat].setValue(countValue)
- values = value
- for i, value in enumerate(values):
- try:
- conv.write(writer, font, table, value, i)
- except Exception as e:
- name = value.__class__.__name__ if value is not None else conv.name
- e.args = e.args + (name+'['+str(i)+']',)
- raise
+ try:
+ conv.writeArray(writer, font, table, value)
+ except Exception as e:
+ e.args = e.args + (conv.name+'[]',)
+ raise
elif conv.isCount:
# Special-case Count values.
# Assumption: a Count field will *always* precede
@@ -812,6 +858,37 @@ class BaseTable(object):
return self.__dict__ == other.__dict__
+ class SubTableEntry(NamedTuple):
+ """See BaseTable.iterSubTables()"""
+ name: str
+ value: "BaseTable"
+ index: Optional[int] = None # index into given array, None for single values
+
+ def iterSubTables(self) -> Iterator[SubTableEntry]:
+ """Yield (name, value, index) namedtuples for all subtables of current table.
+
+ A sub-table is an instance of BaseTable (or subclass thereof) that is a child
+ of self, the current parent table.
+ The tuples also contain the attribute name (str) of the of parent table to get
+ a subtable, and optionally, for lists of subtables (i.e. attributes associated
+ with a converter that has a 'repeat'), an index into the list containing the
+ given subtable value.
+ This method can be useful to traverse trees of otTables.
+ """
+ for conv in self.getConverters():
+ name = conv.name
+ value = getattr(self, name, None)
+ if value is None:
+ continue
+ if isinstance(value, BaseTable):
+ yield self.SubTableEntry(name, value)
+ elif isinstance(value, list):
+ yield from (
+ self.SubTableEntry(name, v, index=i)
+ for i, v in enumerate(value)
+ if isinstance(v, BaseTable)
+ )
+
class FormatSwitchingBaseTable(BaseTable):
@@ -823,6 +900,15 @@ class FormatSwitchingBaseTable(BaseTable):
return NotImplemented
def getConverters(self):
+ try:
+ fmt = self.Format
+ except AttributeError:
+ # some FormatSwitchingBaseTables (e.g. Coverage) no longer have 'Format'
+ # attribute after fully decompiled, only gain one in preWrite before being
+ # recompiled. In the decompiled state, these hand-coded classes defined in
+ # otTables.py lose their format-specific nature and gain more high-level
+ # attributes that are not tied to converters.
+ return []
return self.converters.get(self.Format, [])
def getConverterByName(self, name):
@@ -970,6 +1056,13 @@ class ValueRecord(object):
format = format | valueRecordFormatDict[name][0]
return format
+ def getEffectiveFormat(self):
+ format = 0
+ for name,value in self.__dict__.items():
+ if value:
+ format = format | valueRecordFormatDict[name][0]
+ return format
+
def toXML(self, xmlWriter, font, valueName, attrs=None):
if attrs is None:
simpleItems = []
diff --git a/Lib/fontTools/ttLib/tables/otConverters.py b/Lib/fontTools/ttLib/tables/otConverters.py
index 4af38acd..44fcd0ab 100644
--- a/Lib/fontTools/ttLib/tables/otConverters.py
+++ b/Lib/fontTools/ttLib/tables/otConverters.py
@@ -1,4 +1,3 @@
-from fontTools.misc.py23 import bytesjoin, tobytes, tostr
from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
floatToFixed as fl2fi,
@@ -7,14 +6,15 @@ from fontTools.misc.fixedTools import (
ensureVersionIsLong as fi2ve,
versionToFixed as ve2fi,
)
-from fontTools.misc.textTools import pad, safeEval
+from fontTools.misc.roundTools import nearestMultipleShortestRepr, otRound
+from fontTools.misc.textTools import bytesjoin, tobytes, tostr, pad, safeEval
from fontTools.ttLib import getSearchRange
from .otBase import (CountReference, FormatSwitchingBaseTable,
OTTableReader, OTTableWriter, ValueRecordFactory)
from .otTables import (lookupTypes, AATStateTable, AATState, AATAction,
ContextualMorphAction, LigatureMorphAction,
- InsertionMorphAction, MorxSubtable, VariableFloat,
- VariableInt, ExtendMode as _ExtendMode,
+ InsertionMorphAction, MorxSubtable,
+ ExtendMode as _ExtendMode,
CompositeMode as _CompositeMode)
from itertools import zip_longest
from functools import partial
@@ -192,8 +192,12 @@ class BaseConverter(object):
raise NotImplementedError(self)
def writeArray(self, writer, font, tableDict, values):
- for i, value in enumerate(values):
- self.write(writer, font, tableDict, value, i)
+ try:
+ for i, value in enumerate(values):
+ self.write(writer, font, tableDict, value, i)
+ except Exception as e:
+ e.args = e.args + (i,)
+ raise
def write(self, writer, font, tableDict, value, repeatIndex=None):
"""Write a value to the writer."""
@@ -221,6 +225,18 @@ class SimpleValue(BaseConverter):
def xmlRead(self, attrs, content, font):
return self.fromString(attrs["value"])
+class OptionalValue(SimpleValue):
+ DEFAULT = None
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ if value != self.DEFAULT:
+ attrs.append(("value", self.toString(value)))
+ xmlWriter.simpletag(name, attrs)
+ xmlWriter.newline()
+ def xmlRead(self, attrs, content, font):
+ if "value" in attrs:
+ return self.fromString(attrs["value"])
+ return self.DEFAULT
+
class IntValue(SimpleValue):
@staticmethod
def fromString(value):
@@ -230,48 +246,75 @@ class Long(IntValue):
staticSize = 4
def read(self, reader, font, tableDict):
return reader.readLong()
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readLongArray(count)
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeLong(value)
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeLongArray(values)
class ULong(IntValue):
staticSize = 4
def read(self, reader, font, tableDict):
return reader.readULong()
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readULongArray(count)
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeULong(value)
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeULongArray(values)
class Flags32(ULong):
@staticmethod
def toString(value):
return "0x%08X" % value
+class VarIndex(OptionalValue, ULong):
+ DEFAULT = 0xFFFFFFFF
+
class Short(IntValue):
staticSize = 2
def read(self, reader, font, tableDict):
return reader.readShort()
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readShortArray(count)
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeShort(value)
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeShortArray(values)
class UShort(IntValue):
staticSize = 2
def read(self, reader, font, tableDict):
return reader.readUShort()
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readUShortArray(count)
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeUShort(value)
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeUShortArray(values)
class Int8(IntValue):
staticSize = 1
def read(self, reader, font, tableDict):
return reader.readInt8()
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readInt8Array(count)
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeInt8(value)
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeInt8Array(values)
class UInt8(IntValue):
staticSize = 1
def read(self, reader, font, tableDict):
return reader.readUInt8()
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readUInt8Array(count)
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeUInt8(value)
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeUInt8Array(values)
class UInt24(IntValue):
staticSize = 3
@@ -304,16 +347,11 @@ class GlyphID(SimpleValue):
staticSize = 2
typecode = "H"
def readArray(self, reader, font, tableDict, count):
- glyphOrder = font.getGlyphOrder()
- gids = reader.readArray(self.typecode, self.staticSize, count)
- try:
- l = [glyphOrder[gid] for gid in gids]
- except IndexError:
- # Slower, but will not throw an IndexError on an invalid glyph id.
- l = [font.getGlyphName(gid) for gid in gids]
- return l
+ return font.getGlyphNameMany(reader.readArray(self.typecode, self.staticSize, count))
def read(self, reader, font, tableDict):
return font.getGlyphName(reader.readValue(self.typecode, self.staticSize))
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeArray(self.typecode, font.getGlyphIDMany(values))
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeValue(self.typecode, font.getGlyphID(value))
@@ -390,6 +428,22 @@ class F2Dot14(FloatValue):
def toString(value):
return fl2str(value, 14)
+class Angle(F2Dot14):
+ # angles are specified in degrees, and encoded as F2Dot14 fractions of half
+ # circle: e.g. 1.0 => 180, -0.5 => -90, -2.0 => -360, etc.
+ factor = 1.0/(1<<14) * 180 # 0.010986328125
+ def read(self, reader, font, tableDict):
+ return super().read(reader, font, tableDict) * 180
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ super().write(writer, font, tableDict, value / 180, repeatIndex=repeatIndex)
+ @classmethod
+ def fromString(cls, value):
+ # quantize to nearest multiples of minimum fixed-precision angle
+ return otRound(float(value) / cls.factor) * cls.factor
+ @classmethod
+ def toString(cls, value):
+ return nearestMultipleShortestRepr(value, cls.factor)
+
class Version(SimpleValue):
staticSize = 4
def read(self, reader, font, tableDict):
@@ -1155,8 +1209,7 @@ class STXHeader(BaseConverter):
def _readLigatures(self, reader, font):
limit = len(reader.data)
numLigatureGlyphs = (limit - reader.pos) // 2
- return [font.getGlyphName(g)
- for g in reader.readUShortArray(numLigatureGlyphs)]
+ return font.getGlyphNameMany(reader.readUShortArray(numLigatureGlyphs))
def _countPerGlyphLookups(self, table):
# Somewhat annoyingly, the morx table does not encode
@@ -1551,20 +1604,15 @@ class VarIdxMapValue(BaseConverter):
outerShift = 16 - innerBits
entrySize = 1 + ((fmt & 0x0030) >> 4)
- read = {
- 1: reader.readUInt8,
- 2: reader.readUShort,
- 3: reader.readUInt24,
- 4: reader.readULong,
+ readArray = {
+ 1: reader.readUInt8Array,
+ 2: reader.readUShortArray,
+ 3: reader.readUInt24Array,
+ 4: reader.readULongArray,
}[entrySize]
- mapping = []
- for i in range(nItems):
- raw = read()
- idx = ((raw & outerMask) << outerShift) | (raw & innerMask)
- mapping.append(idx)
-
- return mapping
+ return [(((raw & outerMask) << outerShift) | (raw & innerMask))
+ for raw in readArray(nItems)]
def write(self, writer, font, tableDict, value, repeatIndex=None):
fmt = tableDict['EntryFormat']
@@ -1576,16 +1624,15 @@ class VarIdxMapValue(BaseConverter):
outerShift = 16 - innerBits
entrySize = 1 + ((fmt & 0x0030) >> 4)
- write = {
- 1: writer.writeUInt8,
- 2: writer.writeUShort,
- 3: writer.writeUInt24,
- 4: writer.writeULong,
+ writeArray = {
+ 1: writer.writeUInt8Array,
+ 2: writer.writeUShortArray,
+ 3: writer.writeUInt24Array,
+ 4: writer.writeULongArray,
}[entrySize]
- for idx in mapping:
- raw = ((idx & 0xFFFF0000) >> outerShift) | (idx & innerMask)
- write(raw)
+ writeArray([(((idx & 0xFFFF0000) >> outerShift) | (idx & innerMask))
+ for idx in mapping])
class VarDataValue(BaseConverter):
@@ -1594,27 +1641,43 @@ class VarDataValue(BaseConverter):
values = []
regionCount = tableDict["VarRegionCount"]
- shortCount = tableDict["NumShorts"]
+ wordCount = tableDict["NumShorts"]
- for i in range(min(regionCount, shortCount)):
- values.append(reader.readShort())
- for i in range(min(regionCount, shortCount), regionCount):
- values.append(reader.readInt8())
- for i in range(regionCount, shortCount):
- reader.readInt8()
+ # https://github.com/fonttools/fonttools/issues/2279
+ longWords = bool(wordCount & 0x8000)
+ wordCount = wordCount & 0x7FFF
+
+ if longWords:
+ readBigArray, readSmallArray = reader.readLongArray, reader.readShortArray
+ else:
+ readBigArray, readSmallArray = reader.readShortArray, reader.readInt8Array
+
+ n1, n2 = min(regionCount, wordCount), max(regionCount, wordCount)
+ values.extend(readBigArray(n1))
+ values.extend(readSmallArray(n2 - n1))
+ if n2 > regionCount: # Padding
+ del values[regionCount:]
return values
- def write(self, writer, font, tableDict, value, repeatIndex=None):
+ def write(self, writer, font, tableDict, values, repeatIndex=None):
regionCount = tableDict["VarRegionCount"]
- shortCount = tableDict["NumShorts"]
+ wordCount = tableDict["NumShorts"]
- for i in range(min(regionCount, shortCount)):
- writer.writeShort(value[i])
- for i in range(min(regionCount, shortCount), regionCount):
- writer.writeInt8(value[i])
- for i in range(regionCount, shortCount):
- writer.writeInt8(0)
+ # https://github.com/fonttools/fonttools/issues/2279
+ longWords = bool(wordCount & 0x8000)
+ wordCount = wordCount & 0x7FFF
+
+ (writeBigArray, writeSmallArray) = {
+ False: (writer.writeShortArray, writer.writeInt8Array),
+ True: (writer.writeLongArray, writer.writeShortArray),
+ }[longWords]
+
+ n1, n2 = min(regionCount, wordCount), max(regionCount, wordCount)
+ writeBigArray(values[:n1])
+ writeSmallArray(values[n1:regionCount])
+ if n2 > regionCount: # Padding
+ writer.writeSmallArray([0] * (n2 - regionCount))
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.simpletag(name, attrs + [("value", value)])
@@ -1637,99 +1700,6 @@ class LookupFlag(UShort):
xmlWriter.comment(" ".join(flags))
xmlWriter.newline()
-def _issubclass_namedtuple(x):
- return (
- issubclass(x, tuple)
- and getattr(x, "_fields", None) is not None
- )
-
-
-class _NamedTupleConverter(BaseConverter):
- # subclasses must override this
- tupleClass = NotImplemented
- # List[SimpleValue]
- converterClasses = NotImplemented
-
- def __init__(self, name, repeat, aux, tableClass=None):
- # we expect all converters to be subclasses of SimpleValue
- assert all(issubclass(klass, SimpleValue) for klass in self.converterClasses)
- assert _issubclass_namedtuple(self.tupleClass), repr(self.tupleClass)
- assert len(self.tupleClass._fields) == len(self.converterClasses)
- assert tableClass is None # tableClass is unused by SimplValues
- BaseConverter.__init__(self, name, repeat, aux)
- self.converters = [
- klass(name=name, repeat=None, aux=None)
- for name, klass in zip(self.tupleClass._fields, self.converterClasses)
- ]
- self.convertersByName = {conv.name: conv for conv in self.converters}
- # returned by getRecordSize method
- self.staticSize = sum(c.staticSize for c in self.converters)
-
- def read(self, reader, font, tableDict):
- kwargs = {
- conv.name: conv.read(reader, font, tableDict)
- for conv in self.converters
- }
- return self.tupleClass(**kwargs)
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- for conv in self.converters:
- v = getattr(value, conv.name)
- # repeatIndex is unused for SimpleValues
- conv.write(writer, font, tableDict, v, repeatIndex=None)
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- assert value is not None
- defaults = value.__new__.__defaults__ or ()
- assert len(self.converters) >= len(defaults)
- values = {}
- required = object()
- for conv, default in zip_longest(
- reversed(self.converters),
- reversed(defaults),
- fillvalue=required,
- ):
- v = getattr(value, conv.name)
- if default is required or v != default:
- values[conv.name] = conv.toString(v)
- if attrs is None:
- attrs = []
- attrs.extend(
- (conv.name, values[conv.name])
- for conv in self.converters
- if conv.name in values
- )
- xmlWriter.simpletag(name, attrs)
- xmlWriter.newline()
-
- def xmlRead(self, attrs, content, font):
- converters = self.convertersByName
- kwargs = {
- k: converters[k].fromString(v)
- for k, v in attrs.items()
- }
- return self.tupleClass(**kwargs)
-
-
-class VarFixed(_NamedTupleConverter):
- tupleClass = VariableFloat
- converterClasses = [Fixed, ULong]
-
-
-class VarF2Dot14(_NamedTupleConverter):
- tupleClass = VariableFloat
- converterClasses = [F2Dot14, ULong]
-
-
-class VarInt16(_NamedTupleConverter):
- tupleClass = VariableInt
- converterClasses = [Short, ULong]
-
-
-class VarUInt16(_NamedTupleConverter):
- tupleClass = VariableInt
- converterClasses = [UShort, ULong]
-
class _UInt8Enum(UInt8):
enumClass = NotImplemented
@@ -1762,6 +1732,7 @@ converterMapping = {
"uint32": ULong,
"char64": Char64,
"Flags32": Flags32,
+ "VarIndex": VarIndex,
"Version": Version,
"Tag": Tag,
"GlyphID": GlyphID,
@@ -1770,6 +1741,7 @@ converterMapping = {
"DeciPoints": DeciPoints,
"Fixed": Fixed,
"F2Dot14": F2Dot14,
+ "Angle": Angle,
"struct": Struct,
"Offset": Table,
"LOffset": LTable,
@@ -1798,10 +1770,4 @@ converterMapping = {
"OffsetTo": lambda C: partial(Table, tableClass=C),
"LOffsetTo": lambda C: partial(LTable, tableClass=C),
"LOffset24To": lambda C: partial(Table24, tableClass=C),
-
- # Variable types
- "VarFixed": VarFixed,
- "VarF2Dot14": VarF2Dot14,
- "VarInt16": VarInt16,
- "VarUInt16": VarUInt16,
}
diff --git a/Lib/fontTools/ttLib/tables/otData.py b/Lib/fontTools/ttLib/tables/otData.py
index c4294169..dd4033e4 100755
--- a/Lib/fontTools/ttLib/tables/otData.py
+++ b/Lib/fontTools/ttLib/tables/otData.py
@@ -988,6 +988,20 @@ otData = [
('VarIdxMapValue', 'mapping', '', 0, 'Array of compressed data'),
]),
+ ('DeltaSetIndexMapFormat0', [
+ ('uint8', 'Format', None, None, 'Format of the DeltaSetIndexMap = 0'),
+ ('uint8', 'EntryFormat', None, None, ''), # Automatically computed
+ ('uint16', 'MappingCount', None, None, ''), # Automatically computed
+ ('VarIdxMapValue', 'mapping', '', 0, 'Array of compressed data'),
+ ]),
+
+ ('DeltaSetIndexMapFormat1', [
+ ('uint8', 'Format', None, None, 'Format of the DeltaSetIndexMap = 1'),
+ ('uint8', 'EntryFormat', None, None, ''), # Automatically computed
+ ('uint32', 'MappingCount', None, None, ''), # Automatically computed
+ ('VarIdxMapValue', 'mapping', '', 0, 'Array of compressed data'),
+ ]),
+
# Glyph advance variations
('HVAR', [
@@ -1546,8 +1560,10 @@ otData = [
('LOffset', 'BaseGlyphRecordArray', None, None, 'Offset (from beginning of COLR table) to Base Glyph records.'),
('LOffset', 'LayerRecordArray', None, None, 'Offset (from beginning of COLR table) to Layer Records.'),
('uint16', 'LayerRecordCount', None, None, 'Number of Layer Records.'),
- ('LOffset', 'BaseGlyphV1List', None, 'Version >= 1', 'Offset (from beginning of COLR table) to array of Version-1 Base Glyph records.'),
- ('LOffset', 'LayerV1List', None, 'Version >= 1', 'Offset (from beginning of COLR table) to LayerV1List.'),
+ ('LOffset', 'BaseGlyphList', None, 'Version >= 1', 'Offset (from beginning of COLR table) to array of Version-1 Base Glyph records.'),
+ ('LOffset', 'LayerList', None, 'Version >= 1', 'Offset (from beginning of COLR table) to LayerList.'),
+ ('LOffset', 'ClipList', None, 'Version >= 1', 'Offset to ClipList table (may be NULL)'),
+ ('LOffsetTo(DeltaSetIndexMap)', 'VarIndexMap', None, 'Version >= 1', 'Offset to DeltaSetIndexMap table (may be NULL)'),
('LOffset', 'VarStore', None, 'Version >= 1', 'Offset to variation store (may be NULL)'),
]),
@@ -1570,19 +1586,48 @@ otData = [
('uint16', 'PaletteIndex', None, None, 'Index value to use with a selected color palette.'),
]),
- ('BaseGlyphV1List', [
+ ('BaseGlyphList', [
('uint32', 'BaseGlyphCount', None, None, 'Number of Version-1 Base Glyph records'),
- ('struct', 'BaseGlyphV1Record', 'BaseGlyphCount', 0, 'Array of Version-1 Base Glyph records'),
+ ('struct', 'BaseGlyphPaintRecord', 'BaseGlyphCount', 0, 'Array of Version-1 Base Glyph records'),
]),
- ('BaseGlyphV1Record', [
+ ('BaseGlyphPaintRecord', [
('GlyphID', 'BaseGlyph', None, None, 'Glyph ID of reference glyph.'),
- ('LOffset', 'Paint', None, None, 'Offset (from beginning of BaseGlyphV1Record) to Paint, typically a PaintColrLayers.'),
+ ('LOffset', 'Paint', None, None, 'Offset (from beginning of BaseGlyphPaintRecord) to Paint, typically a PaintColrLayers.'),
]),
- ('LayerV1List', [
+ ('LayerList', [
('uint32', 'LayerCount', None, None, 'Number of Version-1 Layers'),
- ('LOffset', 'Paint', 'LayerCount', 0, 'Array of offsets to Paint tables, from the start of the LayerV1List table.'),
+ ('LOffset', 'Paint', 'LayerCount', 0, 'Array of offsets to Paint tables, from the start of the LayerList table.'),
+ ]),
+
+ ('ClipListFormat1', [
+ ('uint8', 'Format', None, None, 'Format for ClipList with 16bit glyph IDs: 1'),
+ ('uint32', 'ClipCount', None, None, 'Number of Clip records.'),
+ ('struct', 'ClipRecord', 'ClipCount', 0, 'Array of Clip records sorted by glyph ID.'),
+ ]),
+
+ ('ClipRecord', [
+ ('uint16', 'StartGlyphID', None, None, 'First glyph ID in the range.'),
+ ('uint16', 'EndGlyphID', None, None, 'Last glyph ID in the range.'),
+ ('Offset24', 'ClipBox', None, None, 'Offset to a ClipBox table.'),
+ ]),
+
+ ('ClipBoxFormat1', [
+ ('uint8', 'Format', None, None, 'Format for ClipBox without variation: set to 1.'),
+ ('int16', 'xMin', None, None, 'Minimum x of clip box.'),
+ ('int16', 'yMin', None, None, 'Minimum y of clip box.'),
+ ('int16', 'xMax', None, None, 'Maximum x of clip box.'),
+ ('int16', 'yMax', None, None, 'Maximum y of clip box.'),
+ ]),
+
+ ('ClipBoxFormat2', [
+ ('uint8', 'Format', None, None, 'Format for variable ClipBox: set to 2.'),
+ ('int16', 'xMin', None, None, 'Minimum x of clip box.'),
+ ('int16', 'yMin', None, None, 'Minimum y of clip box.'),
+ ('int16', 'xMax', None, None, 'Maximum x of clip box.'),
+ ('int16', 'yMax', None, None, 'Maximum y of clip box.'),
+ ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
]),
# COLRv1 Affine2x3 uses the same column-major order to serialize a 2D
@@ -1603,30 +1648,25 @@ otData = [
('Fixed', 'dy', None, None, 'Translation in y direction'),
]),
('VarAffine2x3', [
- ('VarFixed', 'xx', None, None, 'x-part of x basis vector'),
- ('VarFixed', 'yx', None, None, 'y-part of x basis vector'),
- ('VarFixed', 'xy', None, None, 'x-part of y basis vector'),
- ('VarFixed', 'yy', None, None, 'y-part of y basis vector'),
- ('VarFixed', 'dx', None, None, 'Translation in x direction'),
- ('VarFixed', 'dy', None, None, 'Translation in y direction'),
- ]),
-
- ('ColorIndex', [
- ('uint16', 'PaletteIndex', None, None, 'Index value to use with a selected color palette.'),
- ('F2Dot14', 'Alpha', None, None, 'Values outsided [0.,1.] reserved'),
- ]),
- ('VarColorIndex', [
- ('uint16', 'PaletteIndex', None, None, 'Index value to use with a selected color palette.'),
- ('VarF2Dot14', 'Alpha', None, None, 'Values outsided [0.,1.] reserved'),
+ ('Fixed', 'xx', None, None, 'x-part of x basis vector'),
+ ('Fixed', 'yx', None, None, 'y-part of x basis vector'),
+ ('Fixed', 'xy', None, None, 'x-part of y basis vector'),
+ ('Fixed', 'yy', None, None, 'y-part of y basis vector'),
+ ('Fixed', 'dx', None, None, 'Translation in x direction'),
+ ('Fixed', 'dy', None, None, 'Translation in y direction'),
+ ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
]),
('ColorStop', [
('F2Dot14', 'StopOffset', None, None, ''),
- ('ColorIndex', 'Color', None, None, ''),
+ ('uint16', 'PaletteIndex', None, None, 'Index for a CPAL palette entry.'),
+ ('F2Dot14', 'Alpha', None, None, 'Values outsided [0.,1.] reserved'),
]),
('VarColorStop', [
- ('VarF2Dot14', 'StopOffset', None, None, ''),
- ('VarColorIndex', 'Color', None, None, ''),
+ ('F2Dot14', 'StopOffset', None, None, 'VarIndexBase + 0'),
+ ('uint16', 'PaletteIndex', None, None, 'Index for a CPAL palette entry.'),
+ ('F2Dot14', 'Alpha', None, None, 'Values outsided [0.,1.] reserved. VarIndexBase + 1'),
+ ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
]),
('ColorLine', [
@@ -1643,19 +1683,22 @@ otData = [
# PaintColrLayers
('PaintFormat1', [
('uint8', 'PaintFormat', None, None, 'Format identifier-format = 1'),
- ('uint8', 'NumLayers', None, None, 'Number of offsets to Paint to read from LayerV1List.'),
- ('uint32', 'FirstLayerIndex', None, None, 'Index into LayerV1List.'),
+ ('uint8', 'NumLayers', None, None, 'Number of offsets to Paint to read from LayerList.'),
+ ('uint32', 'FirstLayerIndex', None, None, 'Index into LayerList.'),
]),
# PaintSolid
('PaintFormat2', [
('uint8', 'PaintFormat', None, None, 'Format identifier-format = 2'),
- ('ColorIndex', 'Color', None, None, 'A solid color paint.'),
+ ('uint16', 'PaletteIndex', None, None, 'Index for a CPAL palette entry.'),
+ ('F2Dot14', 'Alpha', None, None, 'Values outsided [0.,1.] reserved'),
]),
# PaintVarSolid
('PaintFormat3', [
('uint8', 'PaintFormat', None, None, 'Format identifier-format = 3'),
- ('VarColorIndex', 'Color', None, None, 'A solid color paint.'),
+ ('uint16', 'PaletteIndex', None, None, 'Index for a CPAL palette entry.'),
+ ('F2Dot14', 'Alpha', None, None, 'Values outsided [0.,1.] reserved. VarIndexBase + 0'),
+ ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
]),
# PaintLinearGradient
@@ -1673,12 +1716,13 @@ otData = [
('PaintFormat5', [
('uint8', 'PaintFormat', None, None, 'Format identifier-format = 5'),
('LOffset24To(VarColorLine)', 'ColorLine', None, None, 'Offset (from beginning of PaintVarLinearGradient table) to VarColorLine subtable.'),
- ('VarInt16', 'x0', None, None, ''),
- ('VarInt16', 'y0', None, None, ''),
- ('VarInt16', 'x1', None, None, ''),
- ('VarInt16', 'y1', None, None, ''),
- ('VarInt16', 'x2', None, None, ''),
- ('VarInt16', 'y2', None, None, ''),
+ ('int16', 'x0', None, None, ''),
+ ('int16', 'y0', None, None, ''),
+ ('int16', 'x1', None, None, ''),
+ ('int16', 'y1', None, None, ''),
+ ('int16', 'x2', None, None, ''),
+ ('int16', 'y2', None, None, ''),
+ ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
]),
# PaintRadialGradient
@@ -1696,12 +1740,13 @@ otData = [
('PaintFormat7', [
('uint8', 'PaintFormat', None, None, 'Format identifier-format = 7'),
('LOffset24To(VarColorLine)', 'ColorLine', None, None, 'Offset (from beginning of PaintVarRadialGradient table) to VarColorLine subtable.'),
- ('VarInt16', 'x0', None, None, ''),
- ('VarInt16', 'y0', None, None, ''),
- ('VarUInt16', 'r0', None, None, ''),
- ('VarInt16', 'x1', None, None, ''),
- ('VarInt16', 'y1', None, None, ''),
- ('VarUInt16', 'r1', None, None, ''),
+ ('int16', 'x0', None, None, ''),
+ ('int16', 'y0', None, None, ''),
+ ('uint16', 'r0', None, None, ''),
+ ('int16', 'x1', None, None, ''),
+ ('int16', 'y1', None, None, ''),
+ ('uint16', 'r1', None, None, ''),
+ ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
]),
# PaintSweepGradient
@@ -1710,17 +1755,18 @@ otData = [
('Offset24', 'ColorLine', None, None, 'Offset (from beginning of PaintSweepGradient table) to ColorLine subtable.'),
('int16', 'centerX', None, None, 'Center x coordinate.'),
('int16', 'centerY', None, None, 'Center y coordinate.'),
- ('Fixed', 'startAngle', None, None, 'Start of the angular range of the gradient.'),
- ('Fixed', 'endAngle', None, None, 'End of the angular range of the gradient.'),
+ ('Angle', 'startAngle', None, None, 'Start of the angular range of the gradient.'),
+ ('Angle', 'endAngle', None, None, 'End of the angular range of the gradient.'),
]),
# PaintVarSweepGradient
('PaintFormat9', [
('uint8', 'PaintFormat', None, None, 'Format identifier-format = 9'),
('LOffset24To(VarColorLine)', 'ColorLine', None, None, 'Offset (from beginning of PaintVarSweepGradient table) to VarColorLine subtable.'),
- ('VarInt16', 'centerX', None, None, 'Center x coordinate.'),
- ('VarInt16', 'centerY', None, None, 'Center y coordinate.'),
- ('VarFixed', 'startAngle', None, None, 'Start of the angular range of the gradient.'),
- ('VarFixed', 'endAngle', None, None, 'End of the angular range of the gradient.'),
+ ('int16', 'centerX', None, None, 'Center x coordinate.'),
+ ('int16', 'centerY', None, None, 'Center y coordinate.'),
+ ('Angle', 'startAngle', None, None, 'Start of the angular range of the gradient.'),
+ ('Angle', 'endAngle', None, None, 'End of the angular range of the gradient.'),
+ ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
]),
# PaintGlyph
@@ -1733,76 +1779,177 @@ otData = [
# PaintColrGlyph
('PaintFormat11', [
('uint8', 'PaintFormat', None, None, 'Format identifier-format = 11'),
- ('GlyphID', 'Glyph', None, None, 'Virtual glyph ID for a BaseGlyphV1List base glyph.'),
+ ('GlyphID', 'Glyph', None, None, 'Virtual glyph ID for a BaseGlyphList base glyph.'),
]),
# PaintTransform
('PaintFormat12', [
('uint8', 'PaintFormat', None, None, 'Format identifier-format = 12'),
('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintTransform table) to Paint subtable.'),
- ('Affine2x3', 'Transform', None, None, '2x3 matrix for 2D affine transformations.'),
+ ('LOffset24To(Affine2x3)', 'Transform', None, None, '2x3 matrix for 2D affine transformations.'),
]),
# PaintVarTransform
('PaintFormat13', [
('uint8', 'PaintFormat', None, None, 'Format identifier-format = 13'),
('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarTransform table) to Paint subtable.'),
- ('VarAffine2x3', 'Transform', None, None, '2x3 matrix for 2D affine transformations.'),
+ ('LOffset24To(VarAffine2x3)', 'Transform', None, None, '2x3 matrix for 2D affine transformations.'),
]),
# PaintTranslate
('PaintFormat14', [
('uint8', 'PaintFormat', None, None, 'Format identifier-format = 14'),
('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintTranslate table) to Paint subtable.'),
- ('Fixed', 'dx', None, None, 'Translation in x direction.'),
- ('Fixed', 'dy', None, None, 'Translation in y direction.'),
+ ('int16', 'dx', None, None, 'Translation in x direction.'),
+ ('int16', 'dy', None, None, 'Translation in y direction.'),
]),
# PaintVarTranslate
('PaintFormat15', [
('uint8', 'PaintFormat', None, None, 'Format identifier-format = 15'),
('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarTranslate table) to Paint subtable.'),
- ('VarFixed', 'dx', None, None, 'Translation in x direction.'),
- ('VarFixed', 'dy', None, None, 'Translation in y direction.'),
+ ('int16', 'dx', None, None, 'Translation in x direction.'),
+ ('int16', 'dy', None, None, 'Translation in y direction.'),
+ ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
]),
- # PaintRotate
+ # PaintScale
('PaintFormat16', [
('uint8', 'PaintFormat', None, None, 'Format identifier-format = 16'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintRotate table) to Paint subtable.'),
- ('Fixed', 'angle', None, None, ''),
- ('Fixed', 'centerX', None, None, ''),
- ('Fixed', 'centerY', None, None, ''),
+ ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintScale table) to Paint subtable.'),
+ ('F2Dot14', 'scaleX', None, None, ''),
+ ('F2Dot14', 'scaleY', None, None, ''),
]),
- # PaintVarRotate
+ # PaintVarScale
('PaintFormat17', [
('uint8', 'PaintFormat', None, None, 'Format identifier-format = 17'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarRotate table) to Paint subtable.'),
- ('VarFixed', 'angle', None, None, ''),
- ('VarFixed', 'centerX', None, None, ''),
- ('VarFixed', 'centerY', None, None, ''),
+ ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarScale table) to Paint subtable.'),
+ ('F2Dot14', 'scaleX', None, None, ''),
+ ('F2Dot14', 'scaleY', None, None, ''),
+ ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
]),
- # PaintSkew
+ # PaintScaleAroundCenter
('PaintFormat18', [
('uint8', 'PaintFormat', None, None, 'Format identifier-format = 18'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintSkew table) to Paint subtable.'),
- ('Fixed', 'xSkewAngle', None, None, ''),
- ('Fixed', 'ySkewAngle', None, None, ''),
- ('Fixed', 'centerX', None, None, ''),
- ('Fixed', 'centerY', None, None, ''),
+ ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintScaleAroundCenter table) to Paint subtable.'),
+ ('F2Dot14', 'scaleX', None, None, ''),
+ ('F2Dot14', 'scaleY', None, None, ''),
+ ('int16', 'centerX', None, None, ''),
+ ('int16', 'centerY', None, None, ''),
]),
- # PaintVarSkew
+ # PaintVarScaleAroundCenter
('PaintFormat19', [
('uint8', 'PaintFormat', None, None, 'Format identifier-format = 19'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarSkew table) to Paint subtable.'),
- ('VarFixed', 'xSkewAngle', None, None, ''),
- ('VarFixed', 'ySkewAngle', None, None, ''),
- ('VarFixed', 'centerX', None, None, ''),
- ('VarFixed', 'centerY', None, None, ''),
+ ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarScaleAroundCenter table) to Paint subtable.'),
+ ('F2Dot14', 'scaleX', None, None, ''),
+ ('F2Dot14', 'scaleY', None, None, ''),
+ ('int16', 'centerX', None, None, ''),
+ ('int16', 'centerY', None, None, ''),
+ ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
]),
- # PaintComposite
+ # PaintScaleUniform
('PaintFormat20', [
('uint8', 'PaintFormat', None, None, 'Format identifier-format = 20'),
+ ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintScaleUniform table) to Paint subtable.'),
+ ('F2Dot14', 'scale', None, None, ''),
+ ]),
+ # PaintVarScaleUniform
+ ('PaintFormat21', [
+ ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 21'),
+ ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarScaleUniform table) to Paint subtable.'),
+ ('F2Dot14', 'scale', None, None, ''),
+ ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
+ ]),
+
+ # PaintScaleUniformAroundCenter
+ ('PaintFormat22', [
+ ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 22'),
+ ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintScaleUniformAroundCenter table) to Paint subtable.'),
+ ('F2Dot14', 'scale', None, None, ''),
+ ('int16', 'centerX', None, None, ''),
+ ('int16', 'centerY', None, None, ''),
+ ]),
+ # PaintVarScaleUniformAroundCenter
+ ('PaintFormat23', [
+ ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 23'),
+ ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarScaleUniformAroundCenter table) to Paint subtable.'),
+ ('F2Dot14', 'scale', None, None, ''),
+ ('int16', 'centerX', None, None, ''),
+ ('int16', 'centerY', None, None, ''),
+ ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
+ ]),
+
+ # PaintRotate
+ ('PaintFormat24', [
+ ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 24'),
+ ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintRotate table) to Paint subtable.'),
+ ('Angle', 'angle', None, None, ''),
+ ]),
+ # PaintVarRotate
+ ('PaintFormat25', [
+ ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 25'),
+ ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarRotate table) to Paint subtable.'),
+ ('Angle', 'angle', None, None, ''),
+ ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
+ ]),
+
+ # PaintRotateAroundCenter
+ ('PaintFormat26', [
+ ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 26'),
+ ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintRotateAroundCenter table) to Paint subtable.'),
+ ('Angle', 'angle', None, None, ''),
+ ('int16', 'centerX', None, None, ''),
+ ('int16', 'centerY', None, None, ''),
+ ]),
+ # PaintVarRotateAroundCenter
+ ('PaintFormat27', [
+ ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 27'),
+ ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarRotateAroundCenter table) to Paint subtable.'),
+ ('Angle', 'angle', None, None, ''),
+ ('int16', 'centerX', None, None, ''),
+ ('int16', 'centerY', None, None, ''),
+ ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
+ ]),
+
+ # PaintSkew
+ ('PaintFormat28', [
+ ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 28'),
+ ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintSkew table) to Paint subtable.'),
+ ('Angle', 'xSkewAngle', None, None, ''),
+ ('Angle', 'ySkewAngle', None, None, ''),
+ ]),
+ # PaintVarSkew
+ ('PaintFormat29', [
+ ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 29'),
+ ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarSkew table) to Paint subtable.'),
+ ('Angle', 'xSkewAngle', None, None, ''),
+ ('Angle', 'ySkewAngle', None, None, ''),
+ ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
+ ]),
+
+ # PaintSkewAroundCenter
+ ('PaintFormat30', [
+ ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 30'),
+ ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintSkewAroundCenter table) to Paint subtable.'),
+ ('Angle', 'xSkewAngle', None, None, ''),
+ ('Angle', 'ySkewAngle', None, None, ''),
+ ('int16', 'centerX', None, None, ''),
+ ('int16', 'centerY', None, None, ''),
+ ]),
+ # PaintVarSkewAroundCenter
+ ('PaintFormat31', [
+ ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 31'),
+ ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarSkewAroundCenter table) to Paint subtable.'),
+ ('Angle', 'xSkewAngle', None, None, ''),
+ ('Angle', 'ySkewAngle', None, None, ''),
+ ('int16', 'centerX', None, None, ''),
+ ('int16', 'centerY', None, None, ''),
+ ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
+ ]),
+
+ # PaintComposite
+ ('PaintFormat32', [
+ ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 32'),
('LOffset24To(Paint)', 'SourcePaint', None, None, 'Offset (from beginning of PaintComposite table) to source Paint subtable.'),
('CompositeMode', 'CompositeMode', None, None, 'A CompositeMode enumeration value.'),
('LOffset24To(Paint)', 'BackdropPaint', None, None, 'Offset (from beginning of PaintComposite table) to backdrop Paint subtable.'),
diff --git a/Lib/fontTools/ttLib/tables/otTables.py b/Lib/fontTools/ttLib/tables/otTables.py
index 85befb3b..fbd9db7b 100644
--- a/Lib/fontTools/ttLib/tables/otTables.py
+++ b/Lib/fontTools/ttLib/tables/otTables.py
@@ -5,12 +5,12 @@ OpenType subtables.
Most are constructed upon import from data in otData.py, all are populated with
converter objects from otConverters.py.
"""
+import copy
from enum import IntEnum
import itertools
-from collections import namedtuple
-from fontTools.misc.py23 import bytesjoin
+from collections import defaultdict, namedtuple
from fontTools.misc.roundTools import otRound
-from fontTools.misc.textTools import pad, safeEval
+from fontTools.misc.textTools import bytesjoin, pad, safeEval
from .otBase import (
BaseTable, FormatSwitchingBaseTable, ValueRecord, CountReference,
getFormatSwitchingBaseTableClass,
@@ -425,8 +425,7 @@ class InsertionMorphAction(AATAction):
return []
reader = actionReader.getSubReader(
actionReader.pos + index * 2)
- return [font.getGlyphName(glyphID)
- for glyphID in reader.readUShortArray(count)]
+ return font.getGlyphNameMany(reader.readUShortArray(count))
def toXML(self, xmlWriter, font, attrs, name):
xmlWriter.begintag(name, **attrs)
@@ -521,12 +520,10 @@ class Coverage(FormatSwitchingBaseTable):
def postRead(self, rawTable, font):
if self.Format == 1:
- # TODO only allow glyphs that are valid?
self.glyphs = rawTable["GlyphArray"]
elif self.Format == 2:
glyphs = self.glyphs = []
ranges = rawTable["RangeRecord"]
- glyphOrder = font.getGlyphOrder()
# Some SIL fonts have coverage entries that don't have sorted
# StartCoverageIndex. If it is so, fixup and warn. We undo
# this when writing font out.
@@ -536,25 +533,11 @@ class Coverage(FormatSwitchingBaseTable):
ranges = sorted_ranges
del sorted_ranges
for r in ranges:
- assert r.StartCoverageIndex == len(glyphs), \
- (r.StartCoverageIndex, len(glyphs))
start = r.Start
end = r.End
- try:
- startID = font.getGlyphID(start, requireReal=True)
- except KeyError:
- log.warning("Coverage table has start glyph ID out of range: %s.", start)
- continue
- try:
- endID = font.getGlyphID(end, requireReal=True) + 1
- except KeyError:
- # Apparently some tools use 65535 to "match all" the range
- if end != 'glyph65535':
- log.warning("Coverage table has end glyph ID out of range: %s.", end)
- # NOTE: We clobber out-of-range things here. There are legit uses for those,
- # but none that we have seen in the wild.
- endID = len(glyphOrder)
- glyphs.extend(glyphOrder[glyphID] for glyphID in range(startID, endID))
+ startID = font.getGlyphID(start)
+ endID = font.getGlyphID(end) + 1
+ glyphs.extend(font.getGlyphNameMany(range(startID, endID)))
else:
self.glyphs = []
log.warning("Unknown Coverage format: %s", self.Format)
@@ -566,10 +549,9 @@ class Coverage(FormatSwitchingBaseTable):
glyphs = self.glyphs = []
format = 1
rawTable = {"GlyphArray": glyphs}
- getGlyphID = font.getGlyphID
if glyphs:
# find out whether Format 2 is more compact or not
- glyphIDs = [getGlyphID(glyphName) for glyphName in glyphs ]
+ glyphIDs = font.getGlyphIDMany(glyphs)
brokenOrder = sorted(glyphIDs) != glyphIDs
last = glyphIDs[0]
@@ -618,32 +600,18 @@ class Coverage(FormatSwitchingBaseTable):
glyphs.append(attrs["value"])
-class VarIdxMap(BaseTable):
+class DeltaSetIndexMap(getFormatSwitchingBaseTableClass("uint8")):
def populateDefaults(self, propagator=None):
if not hasattr(self, 'mapping'):
- self.mapping = {}
+ self.mapping = []
def postRead(self, rawTable, font):
assert (rawTable['EntryFormat'] & 0xFFC0) == 0
- glyphOrder = font.getGlyphOrder()
- mapList = rawTable['mapping']
- mapList.extend([mapList[-1]] * (len(glyphOrder) - len(mapList)))
- self.mapping = dict(zip(glyphOrder, mapList))
-
- def preWrite(self, font):
- mapping = getattr(self, "mapping", None)
- if mapping is None:
- mapping = self.mapping = {}
-
- glyphOrder = font.getGlyphOrder()
- mapping = [mapping[g] for g in glyphOrder]
- while len(mapping) > 1 and mapping[-2] == mapping[-1]:
- del mapping[-1]
-
- rawTable = { 'mapping': mapping }
- rawTable['MappingCount'] = len(mapping)
+ self.mapping = rawTable['mapping']
+ @staticmethod
+ def getEntryFormat(mapping):
ored = 0
for idx in mapping:
ored |= idx
@@ -666,9 +634,65 @@ class VarIdxMap(BaseTable):
else:
entrySize = 4
- entryFormat = ((entrySize - 1) << 4) | (innerBits - 1)
+ return ((entrySize - 1) << 4) | (innerBits - 1)
+
+ def preWrite(self, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ mapping = self.mapping = []
+ self.Format = 1 if len(mapping) > 0xFFFF else 0
+ rawTable = self.__dict__.copy()
+ rawTable['MappingCount'] = len(mapping)
+ rawTable['EntryFormat'] = self.getEntryFormat(mapping)
+ return rawTable
+
+ def toXML2(self, xmlWriter, font):
+ for i, value in enumerate(getattr(self, "mapping", [])):
+ attrs = (
+ ('index', i),
+ ('outer', value >> 16),
+ ('inner', value & 0xFFFF),
+ )
+ xmlWriter.simpletag("Map", attrs)
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ self.mapping = mapping = []
+ index = safeEval(attrs['index'])
+ outer = safeEval(attrs['outer'])
+ inner = safeEval(attrs['inner'])
+ assert inner <= 0xFFFF
+ mapping.insert(index, (outer << 16) | inner)
+
+
+class VarIdxMap(BaseTable):
+
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, 'mapping'):
+ self.mapping = {}
+
+ def postRead(self, rawTable, font):
+ assert (rawTable['EntryFormat'] & 0xFFC0) == 0
+ glyphOrder = font.getGlyphOrder()
+ mapList = rawTable['mapping']
+ mapList.extend([mapList[-1]] * (len(glyphOrder) - len(mapList)))
+ self.mapping = dict(zip(glyphOrder, mapList))
+
+ def preWrite(self, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ mapping = self.mapping = {}
+
+ glyphOrder = font.getGlyphOrder()
+ mapping = [mapping[g] for g in glyphOrder]
+ while len(mapping) > 1 and mapping[-2] == mapping[-1]:
+ del mapping[-1]
- rawTable['EntryFormat'] = entryFormat
+ rawTable = {'mapping': mapping}
+ rawTable['MappingCount'] = len(mapping)
+ rawTable['EntryFormat'] = DeltaSetIndexMap.getEntryFormat(mapping)
return rawTable
def toXML2(self, xmlWriter, font):
@@ -726,9 +750,9 @@ class SingleSubst(FormatSwitchingBaseTable):
input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
if self.Format == 1:
delta = rawTable["DeltaGlyphID"]
- inputGIDS = [ font.getGlyphID(name) for name in input ]
+ inputGIDS = font.getGlyphIDMany(input)
outGIDS = [ (glyphID + delta) % 65536 for glyphID in inputGIDS ]
- outNames = [ font.getGlyphName(glyphID) for glyphID in outGIDS ]
+ outNames = font.getGlyphNameMany(outGIDS)
for inp, out in zip(input, outNames):
mapping[inp] = out
elif self.Format == 2:
@@ -882,51 +906,30 @@ class ClassDef(FormatSwitchingBaseTable):
def postRead(self, rawTable, font):
classDefs = {}
- glyphOrder = font.getGlyphOrder()
if self.Format == 1:
start = rawTable["StartGlyph"]
classList = rawTable["ClassValueArray"]
- try:
- startID = font.getGlyphID(start, requireReal=True)
- except KeyError:
- log.warning("ClassDef table has start glyph ID out of range: %s.", start)
- startID = len(glyphOrder)
+ startID = font.getGlyphID(start)
endID = startID + len(classList)
- if endID > len(glyphOrder):
- log.warning("ClassDef table has entries for out of range glyph IDs: %s,%s.",
- start, len(classList))
- # NOTE: We clobber out-of-range things here. There are legit uses for those,
- # but none that we have seen in the wild.
- endID = len(glyphOrder)
-
- for glyphID, cls in zip(range(startID, endID), classList):
+ glyphNames = font.getGlyphNameMany(range(startID, endID))
+ for glyphName, cls in zip(glyphNames, classList):
if cls:
- classDefs[glyphOrder[glyphID]] = cls
+ classDefs[glyphName] = cls
elif self.Format == 2:
records = rawTable["ClassRangeRecord"]
for rec in records:
- start = rec.Start
- end = rec.End
cls = rec.Class
- try:
- startID = font.getGlyphID(start, requireReal=True)
- except KeyError:
- log.warning("ClassDef table has start glyph ID out of range: %s.", start)
+ if not cls:
continue
- try:
- endID = font.getGlyphID(end, requireReal=True) + 1
- except KeyError:
- # Apparently some tools use 65535 to "match all" the range
- if end != 'glyph65535':
- log.warning("ClassDef table has end glyph ID out of range: %s.", end)
- # NOTE: We clobber out-of-range things here. There are legit uses for those,
- # but none that we have seen in the wild.
- endID = len(glyphOrder)
- for glyphID in range(startID, endID):
- if cls:
- classDefs[glyphOrder[glyphID]] = cls
+ start = rec.Start
+ end = rec.End
+ startID = font.getGlyphID(start)
+ endID = font.getGlyphID(end) + 1
+ glyphNames = font.getGlyphNameMany(range(startID, endID))
+ for glyphName in glyphNames:
+ classDefs[glyphName] = cls
else:
log.warning("Unknown ClassDef format: %s", self.Format)
self.classDefs = classDefs
@@ -1179,7 +1182,6 @@ class COLR(BaseTable):
if conv.name != "LayerRecordCount":
subReader.advance(conv.staticSize)
continue
- conv = self.getConverterByName("LayerRecordCount")
reader[conv.name] = conv.read(subReader, font, tableDict={})
break
else:
@@ -1245,51 +1247,176 @@ class BaseGlyphRecordArray(BaseTable):
return self.__dict__.copy()
-class BaseGlyphV1List(BaseTable):
+class BaseGlyphList(BaseTable):
def preWrite(self, font):
- self.BaseGlyphV1Record = sorted(
- self.BaseGlyphV1Record,
+ self.BaseGlyphPaintRecord = sorted(
+ self.BaseGlyphPaintRecord,
key=lambda rec: font.getGlyphID(rec.BaseGlyph)
)
return self.__dict__.copy()
+class ClipBox(getFormatSwitchingBaseTableClass("uint8")):
-class VariableValue(namedtuple("VariableValue", ["value", "varIdx"])):
- __slots__ = ()
+ def as_tuple(self):
+ return tuple(getattr(self, conv.name) for conv in self.getConverters())
- _value_mapper = None
+ def __repr__(self):
+ return f"{self.__class__.__name__}{self.as_tuple()}"
- def __new__(cls, value, varIdx=0):
- return super().__new__(
- cls,
- cls._value_mapper(value) if cls._value_mapper else value,
- varIdx
- )
- @classmethod
- def _make(cls, iterable):
- if cls._value_mapper:
- it = iter(iterable)
- try:
- value = next(it)
- except StopIteration:
- pass
- else:
- value = cls._value_mapper(value)
- iterable = itertools.chain((value,), it)
- return super()._make(iterable)
+class ClipList(getFormatSwitchingBaseTableClass("uint8")):
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "clips"):
+ self.clips = {}
-class VariableFloat(VariableValue):
- __slots__ = ()
- _value_mapper = float
+ def postRead(self, rawTable, font):
+ clips = {}
+ glyphOrder = font.getGlyphOrder()
+ for i, rec in enumerate(rawTable["ClipRecord"]):
+ if rec.StartGlyphID > rec.EndGlyphID:
+ log.warning(
+ "invalid ClipRecord[%i].StartGlyphID (%i) > "
+ "EndGlyphID (%i); skipped",
+ i,
+ rec.StartGlyphID,
+ rec.EndGlyphID,
+ )
+ continue
+ redefinedGlyphs = []
+ missingGlyphs = []
+ for glyphID in range(rec.StartGlyphID, rec.EndGlyphID + 1):
+ try:
+ glyph = glyphOrder[glyphID]
+ except IndexError:
+ missingGlyphs.append(glyphID)
+ continue
+ if glyph not in clips:
+ clips[glyph] = copy.copy(rec.ClipBox)
+ else:
+ redefinedGlyphs.append(glyphID)
+ if redefinedGlyphs:
+ log.warning(
+ "ClipRecord[%i] overlaps previous records; "
+ "ignoring redefined clip boxes for the "
+ "following glyph ID range: [%i-%i]",
+ i,
+ min(redefinedGlyphs),
+ max(redefinedGlyphs),
+ )
+ if missingGlyphs:
+ log.warning(
+ "ClipRecord[%i] range references missing "
+ "glyph IDs: [%i-%i]",
+ i,
+ min(missingGlyphs),
+ max(missingGlyphs),
+ )
+ self.clips = clips
+
+ def groups(self):
+ glyphsByClip = defaultdict(list)
+ uniqueClips = {}
+ for glyphName, clipBox in self.clips.items():
+ key = clipBox.as_tuple()
+ glyphsByClip[key].append(glyphName)
+ if key not in uniqueClips:
+ uniqueClips[key] = clipBox
+ return {
+ frozenset(glyphs): uniqueClips[key]
+ for key, glyphs in glyphsByClip.items()
+ }
+
+ def preWrite(self, font):
+ if not hasattr(self, "clips"):
+ self.clips = {}
+ clipBoxRanges = {}
+ glyphMap = font.getReverseGlyphMap()
+ for glyphs, clipBox in self.groups().items():
+ glyphIDs = sorted(
+ glyphMap[glyphName] for glyphName in glyphs
+ if glyphName in glyphMap
+ )
+ if not glyphIDs:
+ continue
+ last = glyphIDs[0]
+ ranges = [[last]]
+ for glyphID in glyphIDs[1:]:
+ if glyphID != last + 1:
+ ranges[-1].append(last)
+ ranges.append([glyphID])
+ last = glyphID
+ ranges[-1].append(last)
+ for start, end in ranges:
+ assert (start, end) not in clipBoxRanges
+ clipBoxRanges[(start, end)] = clipBox
+
+ clipRecords = []
+ for (start, end), clipBox in sorted(clipBoxRanges.items()):
+ record = ClipRecord()
+ record.StartGlyphID = start
+ record.EndGlyphID = end
+ record.ClipBox = clipBox
+ clipRecords.append(record)
+ rawTable = {
+ "ClipCount": len(clipRecords),
+ "ClipRecord": clipRecords,
+ }
+ return rawTable
+ def toXML(self, xmlWriter, font, attrs=None, name=None):
+ tableName = name if name else self.__class__.__name__
+ if attrs is None:
+ attrs = []
+ if hasattr(self, "Format"):
+ attrs.append(("Format", self.Format))
+ xmlWriter.begintag(tableName, attrs)
+ xmlWriter.newline()
+ # sort clips alphabetically to ensure deterministic XML dump
+ for glyphs, clipBox in sorted(
+ self.groups().items(), key=lambda item: min(item[0])
+ ):
+ xmlWriter.begintag("Clip")
+ xmlWriter.newline()
+ for glyphName in sorted(glyphs):
+ xmlWriter.simpletag("Glyph", value=glyphName)
+ xmlWriter.newline()
+ xmlWriter.begintag("ClipBox", [("Format", clipBox.Format)])
+ xmlWriter.newline()
+ clipBox.toXML2(xmlWriter, font)
+ xmlWriter.endtag("ClipBox")
+ xmlWriter.newline()
+ xmlWriter.endtag("Clip")
+ xmlWriter.newline()
+ xmlWriter.endtag(tableName)
+ xmlWriter.newline()
-class VariableInt(VariableValue):
- __slots__ = ()
- _value_mapper = otRound
+ def fromXML(self, name, attrs, content, font):
+ clips = getattr(self, "clips", None)
+ if clips is None:
+ self.clips = clips = {}
+ assert name == "Clip"
+ glyphs = []
+ clipBox = None
+ for elem in content:
+ if not isinstance(elem, tuple):
+ continue
+ name, attrs, content = elem
+ if name == "Glyph":
+ glyphs.append(attrs["value"])
+ elif name == "ClipBox":
+ clipBox = ClipBox()
+ clipBox.Format = safeEval(attrs["Format"])
+ for elem in content:
+ if not isinstance(elem, tuple):
+ continue
+ name, attrs, content = elem
+ clipBox.fromXML(name, attrs, content, font)
+ if clipBox:
+ for glyphName in glyphs:
+ clips[glyphName] = clipBox
class ExtendMode(IntEnum):
@@ -1313,21 +1440,22 @@ class CompositeMode(IntEnum):
SRC_ATOP = 9
DEST_ATOP = 10
XOR = 11
- SCREEN = 12
- OVERLAY = 13
- DARKEN = 14
- LIGHTEN = 15
- COLOR_DODGE = 16
- COLOR_BURN = 17
- HARD_LIGHT = 18
- SOFT_LIGHT = 19
- DIFFERENCE = 20
- EXCLUSION = 21
- MULTIPLY = 22
- HSL_HUE = 23
- HSL_SATURATION = 24
- HSL_COLOR = 25
- HSL_LUMINOSITY = 26
+ PLUS = 12
+ SCREEN = 13
+ OVERLAY = 14
+ DARKEN = 15
+ LIGHTEN = 16
+ COLOR_DODGE = 17
+ COLOR_BURN = 18
+ HARD_LIGHT = 19
+ SOFT_LIGHT = 20
+ DIFFERENCE = 21
+ EXCLUSION = 22
+ MULTIPLY = 23
+ HSL_HUE = 24
+ HSL_SATURATION = 25
+ HSL_COLOR = 26
+ HSL_LUMINOSITY = 27
class PaintFormat(IntEnum):
@@ -1346,11 +1474,23 @@ class PaintFormat(IntEnum):
PaintVarTransform = 13
PaintTranslate = 14
PaintVarTranslate = 15
- PaintRotate = 16
- PaintVarRotate = 17
- PaintSkew = 18
- PaintVarSkew = 19
- PaintComposite = 20
+ PaintScale = 16
+ PaintVarScale = 17
+ PaintScaleAroundCenter = 18
+ PaintVarScaleAroundCenter = 19
+ PaintScaleUniform = 20
+ PaintVarScaleUniform = 21
+ PaintScaleUniformAroundCenter = 22
+ PaintVarScaleUniformAroundCenter = 23
+ PaintRotate = 24
+ PaintVarRotate = 25
+ PaintRotateAroundCenter = 26
+ PaintVarRotateAroundCenter = 27
+ PaintSkew = 28
+ PaintVarSkew = 29
+ PaintSkewAroundCenter = 30
+ PaintVarSkewAroundCenter = 31
+ PaintComposite = 32
class Paint(getFormatSwitchingBaseTableClass("uint8")):
@@ -1375,16 +1515,20 @@ class Paint(getFormatSwitchingBaseTableClass("uint8")):
def getChildren(self, colr):
if self.Format == PaintFormat.PaintColrLayers:
- return colr.LayerV1List.Paint[
+ # https://github.com/fonttools/fonttools/issues/2438: don't die when no LayerList exists
+ layers = []
+ if colr.LayerList is not None:
+ layers = colr.LayerList.Paint
+ return layers[
self.FirstLayerIndex : self.FirstLayerIndex + self.NumLayers
]
if self.Format == PaintFormat.PaintColrGlyph:
- for record in colr.BaseGlyphV1List.BaseGlyphV1Record:
+ for record in colr.BaseGlyphList.BaseGlyphPaintRecord:
if record.BaseGlyph == self.Glyph:
return [record.Paint]
else:
- raise KeyError(f"{self.Glyph!r} not in colr.BaseGlyphV1List")
+ raise KeyError(f"{self.Glyph!r} not in colr.BaseGlyphList")
children = []
for conv in self.getConverters():
@@ -1490,20 +1634,22 @@ def fixLookupOverFlows(ttf, overflowRecord):
return ok
lookup = lookups[lookupIndex]
- lookup.LookupType = extType
- for si in range(len(lookup.SubTable)):
- subTable = lookup.SubTable[si]
- extSubTableClass = lookupTypes[overflowRecord.tableType][extType]
- extSubTable = extSubTableClass()
- extSubTable.Format = 1
- extSubTable.ExtSubTable = subTable
- lookup.SubTable[si] = extSubTable
+ for lookupIndex in range(lookupIndex, len(lookups)):
+ lookup = lookups[lookupIndex]
+ if lookup.LookupType != extType:
+ lookup.LookupType = extType
+ for si in range(len(lookup.SubTable)):
+ subTable = lookup.SubTable[si]
+ extSubTableClass = lookupTypes[overflowRecord.tableType][extType]
+ extSubTable = extSubTableClass()
+ extSubTable.Format = 1
+ extSubTable.ExtSubTable = subTable
+ lookup.SubTable[si] = extSubTable
ok = 1
return ok
def splitMultipleSubst(oldSubTable, newSubTable, overflowRecord):
ok = 1
- newSubTable.Format = oldSubTable.Format
oldMapping = sorted(oldSubTable.mapping.items())
oldLen = len(oldMapping)
@@ -1529,7 +1675,6 @@ def splitMultipleSubst(oldSubTable, newSubTable, overflowRecord):
def splitAlternateSubst(oldSubTable, newSubTable, overflowRecord):
ok = 1
- newSubTable.Format = oldSubTable.Format
if hasattr(oldSubTable, 'sortCoverageLast'):
newSubTable.sortCoverageLast = oldSubTable.sortCoverageLast
@@ -1559,7 +1704,6 @@ def splitAlternateSubst(oldSubTable, newSubTable, overflowRecord):
def splitLigatureSubst(oldSubTable, newSubTable, overflowRecord):
ok = 1
- newSubTable.Format = oldSubTable.Format
oldLigs = sorted(oldSubTable.ligatures.items())
oldLen = len(oldLigs)
diff --git a/Lib/fontTools/ttLib/tables/ttProgram.py b/Lib/fontTools/ttLib/tables/ttProgram.py
index a1dfa3c5..72377583 100644
--- a/Lib/fontTools/ttLib/tables/ttProgram.py
+++ b/Lib/fontTools/ttLib/tables/ttProgram.py
@@ -1,7 +1,6 @@
"""ttLib.tables.ttProgram.py -- Assembler/disassembler for TrueType bytecode programs."""
-from fontTools.misc.py23 import strjoin
-from fontTools.misc.textTools import num2binary, binary2num, readHex
+from fontTools.misc.textTools import num2binary, binary2num, readHex, strjoin
import array
from io import StringIO
import re
diff --git a/Lib/fontTools/ttLib/ttCollection.py b/Lib/fontTools/ttLib/ttCollection.py
index 3db4c8cd..f0922127 100644
--- a/Lib/fontTools/ttLib/ttCollection.py
+++ b/Lib/fontTools/ttLib/ttCollection.py
@@ -26,8 +26,10 @@ class TTCollection(object):
assert 'fontNumber' not in kwargs, kwargs
+ closeStream = False
if not hasattr(file, "read"):
file = open(file, "rb")
+ closeStream = True
tableCache = {} if shareTables else None
@@ -35,13 +37,21 @@ class TTCollection(object):
for i in range(header.numFonts):
font = TTFont(file, fontNumber=i, _tableCache=tableCache, **kwargs)
fonts.append(font)
-
+
+ # don't close file if lazy=True, as the TTFont hold a reference to the original
+ # file; the file will be closed once the TTFonts are closed in the
+ # TTCollection.close(). We still want to close the file if lazy is None or
+ # False, because in that case the TTFont no longer need the original file
+ # and we want to avoid 'ResourceWarning: unclosed file'.
+ if not kwargs.get("lazy") and closeStream:
+ file.close()
+
def __enter__(self):
return self
-
+
def __exit__(self, type, value, traceback):
self.close()
-
+
def close(self):
for font in self.fonts:
font.close()
@@ -76,7 +86,7 @@ class TTCollection(object):
final.write(file.getvalue())
file.close()
- def saveXML(self, fileOrPath, newlinestr=None, writeVersion=True, **kwargs):
+ def saveXML(self, fileOrPath, newlinestr="\n", writeVersion=True, **kwargs):
from fontTools.misc import xmlWriter
writer = xmlWriter.XMLWriter(fileOrPath, newlinestr=newlinestr)
diff --git a/Lib/fontTools/ttLib/ttFont.py b/Lib/fontTools/ttLib/ttFont.py
index 41a48751..3929e2f3 100644
--- a/Lib/fontTools/ttLib/ttFont.py
+++ b/Lib/fontTools/ttLib/ttFont.py
@@ -1,5 +1,5 @@
from fontTools.misc import xmlWriter
-from fontTools.misc.py23 import Tag, byteord, tostr
+from fontTools.misc.textTools import Tag, byteord, tostr
from fontTools.misc.loggingTools import deprecateArgument
from fontTools.ttLib import TTLibError
from fontTools.ttLib.sfnt import SFNTReader, SFNTWriter
@@ -12,75 +12,84 @@ log = logging.getLogger(__name__)
class TTFont(object):
- """The main font object. It manages file input and output, and offers
- a convenient way of accessing tables.
- Tables will be only decompiled when necessary, ie. when they're actually
- accessed. This means that simple operations can be extremely fast.
+ """Represents a TrueType font.
+
+ The object manages file input and output, and offers a convenient way of
+ accessing tables. Tables will be only decompiled when necessary, ie. when
+ they're actually accessed. This means that simple operations can be extremely fast.
+
+ Example usage::
+
+ >> from fontTools import ttLib
+ >> tt = ttLib.TTFont("afont.ttf") # Load an existing font file
+ >> tt['maxp'].numGlyphs
+ 242
+ >> tt['OS/2'].achVendID
+ 'B&H\000'
+ >> tt['head'].unitsPerEm
+ 2048
+
+ For details of the objects returned when accessing each table, see :ref:`tables`.
+ To add a table to the font, use the :py:func:`newTable` function::
+
+ >> os2 = newTable("OS/2")
+ >> os2.version = 4
+ >> # set other attributes
+ >> font["OS/2"] = os2
+
+ TrueType fonts can also be serialized to and from XML format (see also the
+ :ref:`ttx` binary)::
+
+ >> tt.saveXML("afont.ttx")
+ Dumping 'LTSH' table...
+ Dumping 'OS/2' table...
+ [...]
+
+ >> tt2 = ttLib.TTFont() # Create a new font object
+ >> tt2.importXML("afont.ttx")
+ >> tt2['maxp'].numGlyphs
+ 242
+
+ The TTFont object may be used as a context manager; this will cause the file
+ reader to be closed after the context ``with`` block is exited::
+
+ with TTFont(filename) as f:
+ # Do stuff
+
+ Args:
+ file: When reading a font from disk, either a pathname pointing to a file,
+ or a readable file object.
+ res_name_or_index: If running on a Macintosh, either a sfnt resource name or
+ an sfnt resource index number. If the index number is zero, TTLib will
+ autodetect whether the file is a flat file or a suitcase. (If it is a suitcase,
+ only the first 'sfnt' resource will be read.)
+ sfntVersion (str): When constructing a font object from scratch, sets the four-byte
+ sfnt magic number to be used. Defaults to ``\0\1\0\0`` (TrueType). To create
+ an OpenType file, use ``OTTO``.
+ flavor (str): Set this to ``woff`` when creating a WOFF file or ``woff2`` for a WOFF2
+ file.
+ checkChecksums (int): How checksum data should be treated. Default is 0
+ (no checking). Set to 1 to check and warn on wrong checksums; set to 2 to
+ raise an exception if any wrong checksums are found.
+ recalcBBoxes (bool): If true (the default), recalculates ``glyf``, ``CFF ``,
+ ``head`` bounding box values and ``hhea``/``vhea`` min/max values on save.
+ Also compiles the glyphs on importing, which saves memory consumption and
+ time.
+ ignoreDecompileErrors (bool): If true, exceptions raised during table decompilation
+ will be ignored, and the binary data will be returned for those tables instead.
+ recalcTimestamp (bool): If true (the default), sets the ``modified`` timestamp in
+ the ``head`` table on save.
+ fontNumber (int): The index of the font in a TrueType Collection file.
+ lazy (bool): If lazy is set to True, many data structures are loaded lazily, upon
+ access only. If it is set to False, many data structures are loaded immediately.
+ The default is ``lazy=None`` which is somewhere in between.
"""
def __init__(self, file=None, res_name_or_index=None,
sfntVersion="\000\001\000\000", flavor=None, checkChecksums=0,
- verbose=None, recalcBBoxes=True, allowVID=False, ignoreDecompileErrors=False,
+ verbose=None, recalcBBoxes=True, allowVID=NotImplemented, ignoreDecompileErrors=False,
recalcTimestamp=True, fontNumber=-1, lazy=None, quiet=None,
_tableCache=None):
-
- """The constructor can be called with a few different arguments.
- When reading a font from disk, 'file' should be either a pathname
- pointing to a file, or a readable file object.
-
- It we're running on a Macintosh, 'res_name_or_index' maybe an sfnt
- resource name or an sfnt resource index number or zero. The latter
- case will cause TTLib to autodetect whether the file is a flat file
- or a suitcase. (If it's a suitcase, only the first 'sfnt' resource
- will be read!)
-
- The 'checkChecksums' argument is used to specify how sfnt
- checksums are treated upon reading a file from disk:
- 0: don't check (default)
- 1: check, print warnings if a wrong checksum is found
- 2: check, raise an exception if a wrong checksum is found.
-
- The TTFont constructor can also be called without a 'file'
- argument: this is the way to create a new empty font.
- In this case you can optionally supply the 'sfntVersion' argument,
- and a 'flavor' which can be None, 'woff', or 'woff2'.
-
- If the recalcBBoxes argument is false, a number of things will *not*
- be recalculated upon save/compile:
- 1) 'glyf' glyph bounding boxes
- 2) 'CFF ' font bounding box
- 3) 'head' font bounding box
- 4) 'hhea' min/max values
- 5) 'vhea' min/max values
- (1) is needed for certain kinds of CJK fonts (ask Werner Lemberg ;-).
- Additionally, upon importing an TTX file, this option cause glyphs
- to be compiled right away. This should reduce memory consumption
- greatly, and therefore should have some impact on the time needed
- to parse/compile large fonts.
-
- If the recalcTimestamp argument is false, the modified timestamp in the
- 'head' table will *not* be recalculated upon save/compile.
-
- If the allowVID argument is set to true, then virtual GID's are
- supported. Asking for a glyph ID with a glyph name or GID that is not in
- the font will return a virtual GID. This is valid for GSUB and cmap
- tables. For SING glyphlets, the cmap table is used to specify Unicode
- values for virtual GI's used in GSUB/GPOS rules. If the gid N is requested
- and does not exist in the font, or the glyphname has the form glyphN
- and does not exist in the font, then N is used as the virtual GID.
- Else, the first virtual GID is assigned as 0x1000 -1; for subsequent new
- virtual GIDs, the next is one less than the previous.
-
- If ignoreDecompileErrors is set to True, exceptions raised in
- individual tables during decompilation will be ignored, falling
- back to the DefaultTable implementation, which simply keeps the
- binary data.
-
- If lazy is set to True, many data structures are loaded lazily, upon
- access only. If it is set to False, many data structures are loaded
- immediately. The default is lazy=None which is somewhere in between.
- """
-
for name in ("verbose", "quiet"):
val = locals().get(name)
if val is not None:
@@ -92,12 +101,6 @@ class TTFont(object):
self.recalcTimestamp = recalcTimestamp
self.tables = {}
self.reader = None
-
- # Permit the user to reference glyphs that are not int the font.
- self.last_vid = 0xFFFE # Can't make it be 0xFFFF, as the world is full unsigned short integer counters that get incremented after the last seen GID value.
- self.reverseVIDDict = {}
- self.VIDDict = {}
- self.allowVID = allowVID
self.ignoreDecompileErrors = ignoreDecompileErrors
if not file:
@@ -154,9 +157,15 @@ class TTFont(object):
self.reader.close()
def save(self, file, reorderTables=True):
- """Save the font to disk. Similarly to the constructor,
- the 'file' argument can be either a pathname or a writable
- file object.
+ """Save the font to disk.
+
+ Args:
+ file: Similarly to the constructor, can be either a pathname or a writable
+ file object.
+ reorderTables (Option[bool]): If true (the default), reorder the tables,
+ sorting them by tag (recommended by the OpenType specification). If
+ false, retain the original font order. If None, reorder by table
+ dependency (fastest).
"""
if not hasattr(file, "write"):
if self.lazy and self.reader.file.name == file:
@@ -215,7 +224,7 @@ class TTFont(object):
return writer.reordersTables()
- def saveXML(self, fileOrPath, newlinestr=None, **kwargs):
+ def saveXML(self, fileOrPath, newlinestr="\n", **kwargs):
"""Export the font as TTX (an XML-based text file), or as a series of text
files when splitTables is true. In the latter case, the 'fileOrPath'
argument should be a path to a directory.
@@ -336,11 +345,15 @@ class TTFont(object):
reader.read()
def isLoaded(self, tag):
- """Return true if the table identified by 'tag' has been
+ """Return true if the table identified by ``tag`` has been
decompiled and loaded into memory."""
return tag in self.tables
def has_key(self, tag):
+ """Test if the table identified by ``tag`` is present in the font.
+
+ As well as this method, ``tag in font`` can also be used to determine the
+ presence of the table."""
if self.isLoaded(tag):
return True
elif self.reader and tag in self.reader:
@@ -353,6 +366,7 @@ class TTFont(object):
__contains__ = has_key
def keys(self):
+ """Returns the list of tables in the font, along with the ``GlyphOrder`` pseudo-table."""
keys = list(self.tables.keys())
if self.reader:
for key in list(self.reader.keys()):
@@ -364,6 +378,14 @@ class TTFont(object):
keys = sortedTagList(keys)
return ["GlyphOrder"] + keys
+ def ensureDecompiled(self):
+ """Decompile all the tables, even if a TTFont was opened in 'lazy' mode."""
+ for tag in self.keys():
+ table = self[tag]
+ if self.lazy is not False and hasattr(table, "ensureDecompiled"):
+ table.ensureDecompiled()
+ self.lazy = False
+
def __len__(self):
return len(list(self.keys()))
@@ -422,15 +444,26 @@ class TTFont(object):
del self.reader[tag]
def get(self, tag, default=None):
+ """Returns the table if it exists or (optionally) a default if it doesn't."""
try:
return self[tag]
except KeyError:
return default
def setGlyphOrder(self, glyphOrder):
+ """Set the glyph order
+
+ Args:
+ glyphOrder ([str]): List of glyph names in order.
+ """
self.glyphOrder = glyphOrder
+ if hasattr(self, '_reverseGlyphOrderDict'):
+ del self._reverseGlyphOrderDict
+ if self.isLoaded("glyf"):
+ self["glyf"].setGlyphOrder(glyphOrder)
def getGlyphOrder(self):
+ """Returns a list of glyph names ordered by their position in the font."""
try:
return self.glyphOrder
except AttributeError:
@@ -544,78 +577,55 @@ class TTFont(object):
from fontTools.misc import textTools
return textTools.caselessSort(self.getGlyphOrder())
- def getGlyphName(self, glyphID, requireReal=False):
+ def getGlyphName(self, glyphID):
+ """Returns the name for the glyph with the given ID.
+
+ If no name is available, synthesises one with the form ``glyphXXXXX``` where
+ ```XXXXX`` is the zero-padded glyph ID.
+ """
try:
return self.getGlyphOrder()[glyphID]
except IndexError:
- if requireReal or not self.allowVID:
- # XXX The ??.W8.otf font that ships with OSX uses higher glyphIDs in
- # the cmap table than there are glyphs. I don't think it's legal...
- return "glyph%.5d" % glyphID
- else:
- # user intends virtual GID support
+ return "glyph%.5d" % glyphID
+
+ def getGlyphNameMany(self, lst):
+ """Converts a list of glyph IDs into a list of glyph names."""
+ glyphOrder = self.getGlyphOrder();
+ cnt = len(glyphOrder)
+ return [glyphOrder[gid] if gid < cnt else "glyph%.5d" % gid
+ for gid in lst]
+
+ def getGlyphID(self, glyphName):
+ """Returns the ID of the glyph with the given name."""
+ try:
+ return self.getReverseGlyphMap()[glyphName]
+ except KeyError:
+ if glyphName[:5] == "glyph":
try:
- glyphName = self.VIDDict[glyphID]
- except KeyError:
- glyphName ="glyph%.5d" % glyphID
- self.last_vid = min(glyphID, self.last_vid )
- self.reverseVIDDict[glyphName] = glyphID
- self.VIDDict[glyphID] = glyphName
- return glyphName
-
- def getGlyphID(self, glyphName, requireReal=False):
- if not hasattr(self, "_reverseGlyphOrderDict"):
- self._buildReverseGlyphOrderDict()
- glyphOrder = self.getGlyphOrder()
- d = self._reverseGlyphOrderDict
- if glyphName not in d:
- if glyphName in glyphOrder:
- self._buildReverseGlyphOrderDict()
- return self.getGlyphID(glyphName)
- else:
- if requireReal:
+ return int(glyphName[5:])
+ except (NameError, ValueError):
raise KeyError(glyphName)
- elif not self.allowVID:
- # Handle glyphXXX only
- if glyphName[:5] == "glyph":
- try:
- return int(glyphName[5:])
- except (NameError, ValueError):
- raise KeyError(glyphName)
- else:
- # user intends virtual GID support
- try:
- glyphID = self.reverseVIDDict[glyphName]
- except KeyError:
- # if name is in glyphXXX format, use the specified name.
- if glyphName[:5] == "glyph":
- try:
- glyphID = int(glyphName[5:])
- except (NameError, ValueError):
- glyphID = None
- if glyphID is None:
- glyphID = self.last_vid -1
- self.last_vid = glyphID
- self.reverseVIDDict[glyphName] = glyphID
- self.VIDDict[glyphID] = glyphName
- return glyphID
-
- glyphID = d[glyphName]
- if glyphName != glyphOrder[glyphID]:
- self._buildReverseGlyphOrderDict()
- return self.getGlyphID(glyphName)
- return glyphID
+
+ def getGlyphIDMany(self, lst):
+ """Converts a list of glyph names into a list of glyph IDs."""
+ d = self.getReverseGlyphMap()
+ try:
+ return [d[glyphName] for glyphName in lst]
+ except KeyError:
+ getGlyphID = self.getGlyphID
+ return [getGlyphID(glyphName) for glyphName in lst]
def getReverseGlyphMap(self, rebuild=False):
+ """Returns a mapping of glyph names to glyph IDs."""
if rebuild or not hasattr(self, "_reverseGlyphOrderDict"):
self._buildReverseGlyphOrderDict()
return self._reverseGlyphOrderDict
def _buildReverseGlyphOrderDict(self):
self._reverseGlyphOrderDict = d = {}
- glyphOrder = self.getGlyphOrder()
- for glyphID in range(len(glyphOrder)):
- d[glyphOrder[glyphID]] = glyphID
+ for glyphID,glyphName in enumerate(self.getGlyphOrder()):
+ d[glyphName] = glyphID
+ return d
def _writeTable(self, tag, writer, done, tableCache=None):
"""Internal helper function for self.save(). Keeps track of
@@ -644,7 +654,11 @@ class TTFont(object):
tableCache[(Tag(tag), tabledata)] = writer[tag]
def getTableData(self, tag):
- """Returns raw table data, whether compiled or directly read from disk.
+ """Returns the binary representation of a table.
+
+ If the table is currently loaded and in memory, the data is compiled to
+ binary and returned; if it is not currently loaded, the binary data is
+ read from the font file and returned.
"""
tag = Tag(tag)
if self.isLoaded(tag):
@@ -688,9 +702,18 @@ class TTFont(object):
or None, if no unicode cmap subtable is available.
By default it will search for the following (platformID, platEncID)
- pairs:
- (3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0)
- This can be customized via the cmapPreferences argument.
+ pairs::
+
+ (3, 10),
+ (0, 6),
+ (0, 4),
+ (3, 1),
+ (0, 3),
+ (0, 2),
+ (0, 1),
+ (0, 0)
+
+ This can be customized via the ``cmapPreferences`` argument.
"""
return self["cmap"].getBestCmap(cmapPreferences=cmapPreferences)
@@ -820,9 +843,9 @@ class GlyphOrder(object):
def fromXML(self, name, attrs, content, ttFont):
if not hasattr(self, "glyphOrder"):
self.glyphOrder = []
- ttFont.setGlyphOrder(self.glyphOrder)
if name == "GlyphID":
self.glyphOrder.append(attrs["name"])
+ ttFont.setGlyphOrder(self.glyphOrder)
def getTableModule(tag):
@@ -854,12 +877,13 @@ _customTableRegistry = {}
def registerCustomTableClass(tag, moduleName, className=None):
"""Register a custom packer/unpacker class for a table.
+
The 'moduleName' must be an importable module. If no 'className'
is given, it is derived from the tag, for example it will be
- table_C_U_S_T_ for a 'CUST' tag.
+ ``table_C_U_S_T_`` for a 'CUST' tag.
The registered table class should be a subclass of
- fontTools.ttLib.tables.DefaultTable.DefaultTable
+ :py:class:`fontTools.ttLib.tables.DefaultTable.DefaultTable`
"""
if className is None:
className = "table_" + tagToIdentifier(tag)
@@ -930,10 +954,14 @@ def tagToIdentifier(tag):
letters get an underscore after the letter. Trailing spaces are
trimmed. Illegal characters are escaped as two hex bytes. If the
result starts with a number (as the result of a hex escape), an
- extra underscore is prepended. Examples:
- 'glyf' -> '_g_l_y_f'
- 'cvt ' -> '_c_v_t'
- 'OS/2' -> 'O_S_2f_2'
+ extra underscore is prepended. Examples::
+
+ >>> tagToIdentifier('glyf')
+ '_g_l_y_f'
+ >>> tagToIdentifier('cvt ')
+ '_c_v_t'
+ >>> tagToIdentifier('OS/2')
+ 'O_S_2f_2'
"""
import re
tag = Tag(tag)
diff --git a/Lib/fontTools/ttLib/woff2.py b/Lib/fontTools/ttLib/woff2.py
index cc58afa5..b66661ab 100644
--- a/Lib/fontTools/ttLib/woff2.py
+++ b/Lib/fontTools/ttLib/woff2.py
@@ -1,4 +1,3 @@
-from fontTools.misc.py23 import Tag, bytechr, byteord, bytesjoin
from io import BytesIO
import sys
import array
@@ -6,7 +5,7 @@ import struct
from collections import OrderedDict
from fontTools.misc import sstruct
from fontTools.misc.arrayTools import calcIntBounds
-from fontTools.misc.textTools import pad
+from fontTools.misc.textTools import Tag, bytechr, byteord, bytesjoin, pad
from fontTools.ttLib import (TTFont, TTLibError, getTableModule, getTableClass,
getSearchRange)
from fontTools.ttLib.sfnt import (SFNTReader, SFNTWriter, DirectoryEntry,
diff --git a/Lib/fontTools/ttx.py b/Lib/fontTools/ttx.py
index 2eed0c5c..3f06c58b 100644
--- a/Lib/fontTools/ttx.py
+++ b/Lib/fontTools/ttx.py
@@ -1,95 +1,112 @@
"""\
usage: ttx [options] inputfile1 [... inputfileN]
- TTX -- From OpenType To XML And Back
-
- If an input file is a TrueType or OpenType font file, it will be
- decompiled to a TTX file (an XML-based text format).
- If an input file is a TTX file, it will be compiled to whatever
- format the data is in, a TrueType or OpenType/CFF font file.
-
- Output files are created so they are unique: an existing file is
- never overwritten.
-
- General options:
- -h Help: print this message.
- --version: show version and exit.
- -d <outputfolder> Specify a directory where the output files are
- to be created.
- -o <outputfile> Specify a file to write the output to. A special
- value of - would use the standard output.
- -f Overwrite existing output file(s), ie. don't append numbers.
- -v Verbose: more messages will be written to stdout about what
- is being done.
- -q Quiet: No messages will be written to stdout about what
- is being done.
- -a allow virtual glyphs ID's on compile or decompile.
-
- Dump options:
- -l List table info: instead of dumping to a TTX file, list some
- minimal info about each table.
- -t <table> Specify a table to dump. Multiple -t options
- are allowed. When no -t option is specified, all tables
- will be dumped.
- -x <table> Specify a table to exclude from the dump. Multiple
- -x options are allowed. -t and -x are mutually exclusive.
- -s Split tables: save the TTX data into separate TTX files per
- table and write one small TTX file that contains references
- to the individual table dumps. This file can be used as
- input to ttx, as long as the table files are in the
- same directory.
- -g Split glyf table: Save the glyf data into separate TTX files
- per glyph and write a small TTX for the glyf table which
- contains references to the individual TTGlyph elements.
- NOTE: specifying -g implies -s (no need for -s together with -g)
- -i Do NOT disassemble TT instructions: when this option is given,
- all TrueType programs (glyph programs, the font program and the
- pre-program) will be written to the TTX file as hex data
- instead of assembly. This saves some time and makes the TTX
- file smaller.
- -z <format> Specify a bitmap data export option for EBDT:
- {'raw', 'row', 'bitwise', 'extfile'} or for the CBDT:
- {'raw', 'extfile'} Each option does one of the following:
- -z raw
- * export the bitmap data as a hex dump
- -z row
- * export each row as hex data
- -z bitwise
- * export each row as binary in an ASCII art style
- -z extfile
- * export the data as external files with XML references
- If no export format is specified 'raw' format is used.
- -e Don't ignore decompilation errors, but show a full traceback
- and abort.
- -y <number> Select font number for TrueType Collection (.ttc/.otc),
- starting from 0.
- --unicodedata <UnicodeData.txt> Use custom database file to write
- character names in the comments of the cmap TTX output.
- --newline <value> Control how line endings are written in the XML
- file. It can be 'LF', 'CR', or 'CRLF'. If not specified, the
- default platform-specific line endings are used.
-
- Compile options:
- -m Merge with TrueType-input-file: specify a TrueType or OpenType
- font file to be merged with the TTX file. This option is only
- valid when at most one TTX file is specified.
- -b Don't recalc glyph bounding boxes: use the values in the TTX
- file as-is.
- --recalc-timestamp Set font 'modified' timestamp to current time.
- By default, the modification time of the TTX file will be used.
- --no-recalc-timestamp Keep the original font 'modified' timestamp.
- --flavor <type> Specify flavor of output font file. May be 'woff'
- or 'woff2'. Note that WOFF2 requires the Brotli Python extension,
- available at https://github.com/google/brotli
- --with-zopfli Use Zopfli instead of Zlib to compress WOFF. The Python
- extension is available at https://pypi.python.org/pypi/zopfli
+TTX -- From OpenType To XML And Back
+
+If an input file is a TrueType or OpenType font file, it will be
+decompiled to a TTX file (an XML-based text format).
+If an input file is a TTX file, it will be compiled to whatever
+format the data is in, a TrueType or OpenType/CFF font file.
+
+Output files are created so they are unique: an existing file is
+never overwritten.
+
+General options
+===============
+
+-h Help print this message.
+--version show version and exit.
+-d <outputfolder> Specify a directory where the output files are
+ to be created.
+-o <outputfile> Specify a file to write the output to. A special
+ value of - would use the standard output.
+-f Overwrite existing output file(s), ie. don't append
+ numbers.
+-v Verbose: more messages will be written to stdout
+ about what is being done.
+-q Quiet: No messages will be written to stdout about
+ what is being done.
+-a allow virtual glyphs ID's on compile or decompile.
+
+Dump options
+============
+
+-l List table info: instead of dumping to a TTX file, list
+ some minimal info about each table.
+-t <table> Specify a table to dump. Multiple -t options
+ are allowed. When no -t option is specified, all tables
+ will be dumped.
+-x <table> Specify a table to exclude from the dump. Multiple
+ -x options are allowed. -t and -x are mutually exclusive.
+-s Split tables: save the TTX data into separate TTX files per
+ table and write one small TTX file that contains references
+ to the individual table dumps. This file can be used as
+ input to ttx, as long as the table files are in the
+ same directory.
+-g Split glyf table: Save the glyf data into separate TTX files
+ per glyph and write a small TTX for the glyf table which
+ contains references to the individual TTGlyph elements.
+ NOTE: specifying -g implies -s (no need for -s together
+ with -g)
+-i Do NOT disassemble TT instructions: when this option is
+ given, all TrueType programs (glyph programs, the font
+ program and the pre-program) will be written to the TTX
+ file as hex data instead of assembly. This saves some time
+ and makes the TTX file smaller.
+-z <format> Specify a bitmap data export option for EBDT:
+ {'raw', 'row', 'bitwise', 'extfile'} or for the CBDT:
+ {'raw', 'extfile'} Each option does one of the following:
+
+ -z raw
+ export the bitmap data as a hex dump
+ -z row
+ export each row as hex data
+ -z bitwise
+ export each row as binary in an ASCII art style
+ -z extfile
+ export the data as external files with XML references
+
+ If no export format is specified 'raw' format is used.
+-e Don't ignore decompilation errors, but show a full traceback
+ and abort.
+-y <number> Select font number for TrueType Collection (.ttc/.otc),
+ starting from 0.
+--unicodedata <UnicodeData.txt>
+ Use custom database file to write character names in the
+ comments of the cmap TTX output.
+--newline <value>
+ Control how line endings are written in the XML file. It
+ can be 'LF', 'CR', or 'CRLF'. If not specified, the
+ default platform-specific line endings are used.
+
+Compile options
+===============
+
+-m Merge with TrueType-input-file: specify a TrueType or
+ OpenType font file to be merged with the TTX file. This
+ option is only valid when at most one TTX file is specified.
+-b Don't recalc glyph bounding boxes: use the values in the
+ TTX file as-is.
+--recalc-timestamp
+ Set font 'modified' timestamp to current time.
+ By default, the modification time of the TTX file will be
+ used.
+--no-recalc-timestamp
+ Keep the original font 'modified' timestamp.
+--flavor <type>
+ Specify flavor of output font file. May be 'woff' or 'woff2'.
+ Note that WOFF2 requires the Brotli Python extension,
+ available at https://github.com/google/brotli
+--with-zopfli
+ Use Zopfli instead of Zlib to compress WOFF. The Python
+ extension is available at https://pypi.python.org/pypi/zopfli
"""
-from fontTools.misc.py23 import Tag, tostr
from fontTools.ttLib import TTFont, TTLibError
from fontTools.misc.macCreatorType import getMacCreatorAndType
from fontTools.unicode import setUnicodeData
+from fontTools.misc.textTools import Tag, tostr
from fontTools.misc.timeTools import timestampSinceEpoch
from fontTools.misc.loggingTools import Timer
from fontTools.misc.cliTools import makeOutputFileName
@@ -118,11 +135,10 @@ class Options(object):
disassembleInstructions = True
mergeFile = None
recalcBBoxes = True
- allowVID = False
ignoreDecompileErrors = True
bitmapGlyphDataFormat = 'raw'
unicodedata = None
- newlinestr = None
+ newlinestr = "\n"
recalcTimestamp = None
flavor = None
useZopfli = False
@@ -184,8 +200,6 @@ class Options(object):
self.mergeFile = value
elif option == "-b":
self.recalcBBoxes = False
- elif option == "-a":
- self.allowVID = True
elif option == "-e":
self.ignoreDecompileErrors = False
elif option == "--unicodedata":
@@ -258,7 +272,7 @@ def ttDump(input, output, options):
log.info('Dumping "%s" to "%s"...', input, output)
if options.unicodedata:
setUnicodeData(options.unicodedata)
- ttf = TTFont(input, 0, allowVID=options.allowVID,
+ ttf = TTFont(input, 0,
ignoreDecompileErrors=options.ignoreDecompileErrors,
fontNumber=options.fontNumber)
ttf.saveXML(output,
@@ -280,8 +294,7 @@ def ttCompile(input, output, options):
sfnt.USE_ZOPFLI = True
ttf = TTFont(options.mergeFile, flavor=options.flavor,
recalcBBoxes=options.recalcBBoxes,
- recalcTimestamp=options.recalcTimestamp,
- allowVID=options.allowVID)
+ recalcTimestamp=options.recalcTimestamp)
ttf.importXML(input)
if options.recalcTimestamp is None and 'head' in ttf:
@@ -374,15 +387,6 @@ def process(jobs, options):
action(input, output, options)
-def waitForKeyPress():
- """Force the DOS Prompt window to stay open so the user gets
- a chance to see what's wrong."""
- import msvcrt
- print('(Hit any key to exit)', file=sys.stderr)
- while not msvcrt.kbhit():
- pass
-
-
def main(args=None):
"""Convert OpenType fonts to XML and back"""
from fontTools import configLogger
@@ -403,16 +407,12 @@ def main(args=None):
log.error("(Cancelled.)")
sys.exit(1)
except SystemExit:
- if sys.platform == "win32":
- waitForKeyPress()
raise
except TTLibError as e:
log.error(e)
sys.exit(1)
except:
log.exception('Unhandled exception has occurred')
- if sys.platform == "win32":
- waitForKeyPress()
sys.exit(1)
diff --git a/Lib/fontTools/ufoLib/__init__.py b/Lib/fontTools/ufoLib/__init__.py
index e846d085..bd04dd7a 100755
--- a/Lib/fontTools/ufoLib/__init__.py
+++ b/Lib/fontTools/ufoLib/__init__.py
@@ -491,7 +491,7 @@ class UFOReader(_UFOBaseIO):
"""
Get maps defining the renaming that was done during any
needed kerning group conversion. This method returns a
- dictionary of this form:
+ dictionary of this form::
{
"side1" : {"old group name" : "new group name"},
@@ -1173,7 +1173,7 @@ class UFOWriter(UFOReader):
when writing groups and kerning in UFO 1 and UFO 2.
This will effectively undo the conversion done when
UFOReader reads this data. The dictionary should have
- this form:
+ this form::
{
"side1" : {"group name to use when writing" : "group name in data"},
diff --git a/Lib/fontTools/ufoLib/filenames.py b/Lib/fontTools/ufoLib/filenames.py
index 2815469f..baf22076 100644
--- a/Lib/fontTools/ufoLib/filenames.py
+++ b/Lib/fontTools/ufoLib/filenames.py
@@ -3,11 +3,88 @@ User name to file name conversion.
This was taken from the UFO 3 spec.
"""
-illegalCharacters = r"\" * + / : < > ? [ \ ] | \0".split(" ")
-illegalCharacters += [chr(i) for i in range(1, 32)]
-illegalCharacters += [chr(0x7F)]
-reservedFileNames = "CON PRN AUX CLOCK$ NUL A:-Z: COM1".lower().split(" ")
-reservedFileNames += "LPT1 LPT2 LPT3 COM2 COM3 COM4".lower().split(" ")
+# Restrictions are taken mostly from
+# https://docs.microsoft.com/en-gb/windows/win32/fileio/naming-a-file#naming-conventions.
+#
+# 1. Integer value zero, sometimes referred to as the ASCII NUL character.
+# 2. Characters whose integer representations are in the range 1 to 31,
+# inclusive.
+# 3. Various characters that (mostly) Windows and POSIX-y filesystems don't
+# allow, plus "(" and ")", as per the specification.
+illegalCharacters = {
+ "\x00",
+ "\x01",
+ "\x02",
+ "\x03",
+ "\x04",
+ "\x05",
+ "\x06",
+ "\x07",
+ "\x08",
+ "\t",
+ "\n",
+ "\x0b",
+ "\x0c",
+ "\r",
+ "\x0e",
+ "\x0f",
+ "\x10",
+ "\x11",
+ "\x12",
+ "\x13",
+ "\x14",
+ "\x15",
+ "\x16",
+ "\x17",
+ "\x18",
+ "\x19",
+ "\x1a",
+ "\x1b",
+ "\x1c",
+ "\x1d",
+ "\x1e",
+ "\x1f",
+ '"',
+ "*",
+ "+",
+ "/",
+ ":",
+ "<",
+ ">",
+ "?",
+ "[",
+ "\\",
+ "]",
+ "(",
+ ")",
+ "|",
+ "\x7f",
+}
+reservedFileNames = {
+ "aux",
+ "clock$",
+ "com1",
+ "com2",
+ "com3",
+ "com4",
+ "com5",
+ "com6",
+ "com7",
+ "com8",
+ "com9",
+ "con",
+ "lpt1",
+ "lpt2",
+ "lpt3",
+ "lpt4",
+ "lpt5",
+ "lpt6",
+ "lpt7",
+ "lpt8",
+ "lpt9",
+ "nul",
+ "prn",
+}
maxFileNameLength = 255
@@ -15,10 +92,9 @@ class NameTranslationError(Exception):
pass
-def userNameToFileName(userName: str, existing=[], prefix="", suffix=""):
+def userNameToFileName(userName: str, existing=(), prefix="", suffix=""):
"""
- existing should be a case-insensitive list
- of all existing file names.
+ `existing` should be a set-like object.
>>> userNameToFileName("a") == "a"
True
diff --git a/Lib/fontTools/ufoLib/glifLib.py b/Lib/fontTools/ufoLib/glifLib.py
index 3003110e..44622a14 100755
--- a/Lib/fontTools/ufoLib/glifLib.py
+++ b/Lib/fontTools/ufoLib/glifLib.py
@@ -10,6 +10,8 @@ in a folder. It offers two ways to read glyph data, and one way to write
glyph data. See the class doc string for details.
"""
+from __future__ import annotations
+
import logging
import enum
from warnings import warn
@@ -19,7 +21,7 @@ import fs.base
import fs.errors
import fs.osfs
import fs.path
-from fontTools.misc.py23 import tobytes
+from fontTools.misc.textTools import tobytes
from fontTools.misc import plistlib
from fontTools.pens.pointPen import AbstractPointPen, PointToSegmentPen
from fontTools.ufoLib.errors import GlifLibError
@@ -205,7 +207,7 @@ class GlyphSet(_UFOBaseIO):
self.glyphNameToFileName = glyphNameToFileNameFunc
self._validateRead = validateRead
self._validateWrite = validateWrite
- self._existingFileNames = None
+ self._existingFileNames: set[str] | None = None
self._reverseContents = None
self.rebuildContents()
@@ -358,23 +360,33 @@ class GlyphSet(_UFOBaseIO):
'glyphObject' argument can be any kind of object (even None);
the readGlyph() method will attempt to set the following
attributes on it:
- "width" the advance width of the glyph
- "height" the advance height of the glyph
- "unicodes" a list of unicode values for this glyph
- "note" a string
- "lib" a dictionary containing custom data
- "image" a dictionary containing image data
- "guidelines" a list of guideline data dictionaries
- "anchors" a list of anchor data dictionaries
+
+ width
+ the advance width of the glyph
+ height
+ the advance height of the glyph
+ unicodes
+ a list of unicode values for this glyph
+ note
+ a string
+ lib
+ a dictionary containing custom data
+ image
+ a dictionary containing image data
+ guidelines
+ a list of guideline data dictionaries
+ anchors
+ a list of anchor data dictionaries
All attributes are optional, in two ways:
- 1) An attribute *won't* be set if the .glif file doesn't
- contain data for it. 'glyphObject' will have to deal
- with default values itself.
- 2) If setting the attribute fails with an AttributeError
- (for example if the 'glyphObject' attribute is read-
- only), readGlyph() will not propagate that exception,
- but ignore that attribute.
+
+ 1) An attribute *won't* be set if the .glif file doesn't
+ contain data for it. 'glyphObject' will have to deal
+ with default values itself.
+ 2) If setting the attribute fails with an AttributeError
+ (for example if the 'glyphObject' attribute is read-
+ only), readGlyph() will not propagate that exception,
+ but ignore that attribute.
To retrieve outline information, you need to pass an object
conforming to the PointPen protocol as the 'pointPen' argument.
@@ -399,14 +411,23 @@ class GlyphSet(_UFOBaseIO):
'glyphObject' argument can be any kind of object (even None);
the writeGlyph() method will attempt to get the following
attributes from it:
- "width" the advance with of the glyph
- "height" the advance height of the glyph
- "unicodes" a list of unicode values for this glyph
- "note" a string
- "lib" a dictionary containing custom data
- "image" a dictionary containing image data
- "guidelines" a list of guideline data dictionaries
- "anchors" a list of anchor data dictionaries
+
+ width
+ the advance width of the glyph
+ height
+ the advance height of the glyph
+ unicodes
+ a list of unicode values for this glyph
+ note
+ a string
+ lib
+ a dictionary containing custom data
+ image
+ a dictionary containing image data
+ guidelines
+ a list of guideline data dictionaries
+ anchors
+ a list of anchor data dictionaries
All attributes are optional: if 'glyphObject' doesn't
have the attribute, it will simply be skipped.
@@ -455,12 +476,12 @@ class GlyphSet(_UFOBaseIO):
fileName = self.contents.get(glyphName)
if fileName is None:
if self._existingFileNames is None:
- self._existingFileNames = {}
- for fileName in self.contents.values():
- self._existingFileNames[fileName] = fileName.lower()
- fileName = self.glyphNameToFileName(glyphName, self._existingFileNames.values())
+ self._existingFileNames = {
+ fileName.lower() for fileName in self.contents.values()
+ }
+ fileName = self.glyphNameToFileName(glyphName, self._existingFileNames)
self.contents[glyphName] = fileName
- self._existingFileNames[fileName] = fileName.lower()
+ self._existingFileNames.add(fileName.lower())
if self._reverseContents is not None:
self._reverseContents[fileName.lower()] = glyphName
data = _writeGlyphToBytes(
@@ -485,9 +506,9 @@ class GlyphSet(_UFOBaseIO):
fileName = self.contents[glyphName]
self.fs.remove(fileName)
if self._existingFileNames is not None:
- del self._existingFileNames[fileName]
+ self._existingFileNames.remove(fileName.lower())
if self._reverseContents is not None:
- del self._reverseContents[self.contents[glyphName].lower()]
+ del self._reverseContents[fileName.lower()]
del self.contents[glyphName]
# dict-like support
@@ -573,9 +594,12 @@ class GlyphSet(_UFOBaseIO):
def glyphNameToFileName(glyphName, existingFileNames):
"""
Wrapper around the userNameToFileName function in filenames.py
+
+ Note that existingFileNames should be a set for large glyphsets
+ or performance will suffer.
"""
if existingFileNames is None:
- existingFileNames = []
+ existingFileNames = set()
return userNameToFileName(glyphName, existing=existingFileNames, suffix=".glif")
# -----------------------
@@ -595,23 +619,33 @@ def readGlyphFromString(
The 'glyphObject' argument can be any kind of object (even None);
the readGlyphFromString() method will attempt to set the following
attributes on it:
- "width" the advance with of the glyph
- "height" the advance height of the glyph
- "unicodes" a list of unicode values for this glyph
- "note" a string
- "lib" a dictionary containing custom data
- "image" a dictionary containing image data
- "guidelines" a list of guideline data dictionaries
- "anchors" a list of anchor data dictionaries
+
+ width
+ the advance width of the glyph
+ height
+ the advance height of the glyph
+ unicodes
+ a list of unicode values for this glyph
+ note
+ a string
+ lib
+ a dictionary containing custom data
+ image
+ a dictionary containing image data
+ guidelines
+ a list of guideline data dictionaries
+ anchors
+ a list of anchor data dictionaries
All attributes are optional, in two ways:
- 1) An attribute *won't* be set if the .glif file doesn't
- contain data for it. 'glyphObject' will have to deal
- with default values itself.
- 2) If setting the attribute fails with an AttributeError
- (for example if the 'glyphObject' attribute is read-
- only), readGlyphFromString() will not propagate that
- exception, but ignore that attribute.
+
+ 1) An attribute *won't* be set if the .glif file doesn't
+ contain data for it. 'glyphObject' will have to deal
+ with default values itself.
+ 2) If setting the attribute fails with an AttributeError
+ (for example if the 'glyphObject' attribute is read-
+ only), readGlyphFromString() will not propagate that
+ exception, but ignore that attribute.
To retrieve outline information, you need to pass an object
conforming to the PointPen protocol as the 'pointPen' argument.
@@ -728,14 +762,23 @@ def writeGlyphToString(
The 'glyphObject' argument can be any kind of object (even None);
the writeGlyphToString() method will attempt to get the following
attributes from it:
- "width" the advance width of the glyph
- "height" the advance height of the glyph
- "unicodes" a list of unicode values for this glyph
- "note" a string
- "lib" a dictionary containing custom data
- "image" a dictionary containing image data
- "guidelines" a list of guideline data dictionaries
- "anchors" a list of anchor data dictionaries
+
+ width
+ the advance width of the glyph
+ height
+ the advance height of the glyph
+ unicodes
+ a list of unicode values for this glyph
+ note
+ a string
+ lib
+ a dictionary containing custom data
+ image
+ a dictionary containing image data
+ guidelines
+ a list of guideline data dictionaries
+ anchors
+ a list of anchor data dictionaries
All attributes are optional: if 'glyphObject' doesn't
have the attribute, it will simply be skipped.
diff --git a/Lib/fontTools/ufoLib/plistlib.py b/Lib/fontTools/ufoLib/plistlib.py
index 76381687..1f52f20a 100644
--- a/Lib/fontTools/ufoLib/plistlib.py
+++ b/Lib/fontTools/ufoLib/plistlib.py
@@ -3,7 +3,7 @@ for the old ufoLib.plistlib module, which was moved to fontTools.misc.plistlib.
Please use the latter instead.
"""
from fontTools.misc.plistlib import dump, dumps, load, loads
-from fontTools.misc.py23 import tobytes
+from fontTools.misc.textTools import tobytes
# The following functions were part of the old py2-like ufoLib.plistlib API.
# They are kept only for backward compatiblity.
diff --git a/Lib/fontTools/unicodedata/Blocks.py b/Lib/fontTools/unicodedata/Blocks.py
index 0755074b..2b30be67 100644
--- a/Lib/fontTools/unicodedata/Blocks.py
+++ b/Lib/fontTools/unicodedata/Blocks.py
@@ -4,9 +4,9 @@
# Source: https://unicode.org/Public/UNIDATA/Blocks.txt
# License: http://unicode.org/copyright.html#License
#
-# Blocks-13.0.0.txt
-# Date: 2019-07-10, 19:06:00 GMT [KW]
-# © 2019 Unicode®, Inc.
+# Blocks-14.0.0.txt
+# Date: 2021-01-22, 23:29:00 GMT [KW]
+# © 2021 Unicode®, Inc.
# For terms of use, see http://www.unicode.org/terms_of_use.html
#
# Unicode Character Database
@@ -37,7 +37,7 @@ RANGES = [
0x0800, # .. 0x083F ; Samaritan
0x0840, # .. 0x085F ; Mandaic
0x0860, # .. 0x086F ; Syriac Supplement
- 0x0870, # .. 0x089F ; No_Block
+ 0x0870, # .. 0x089F ; Arabic Extended-B
0x08A0, # .. 0x08FF ; Arabic Extended-A
0x0900, # .. 0x097F ; Devanagari
0x0980, # .. 0x09FF ; Bengali
@@ -204,9 +204,11 @@ RANGES = [
0x104B0, # .. 0x104FF ; Osage
0x10500, # .. 0x1052F ; Elbasan
0x10530, # .. 0x1056F ; Caucasian Albanian
- 0x10570, # .. 0x105FF ; No_Block
+ 0x10570, # .. 0x105BF ; Vithkuqi
+ 0x105C0, # .. 0x105FF ; No_Block
0x10600, # .. 0x1077F ; Linear A
- 0x10780, # .. 0x107FF ; No_Block
+ 0x10780, # .. 0x107BF ; Latin Extended-F
+ 0x107C0, # .. 0x107FF ; No_Block
0x10800, # .. 0x1083F ; Cypriot Syllabary
0x10840, # .. 0x1085F ; Imperial Aramaic
0x10860, # .. 0x1087F ; Palmyrene
@@ -238,7 +240,7 @@ RANGES = [
0x10EC0, # .. 0x10EFF ; No_Block
0x10F00, # .. 0x10F2F ; Old Sogdian
0x10F30, # .. 0x10F6F ; Sogdian
- 0x10F70, # .. 0x10FAF ; No_Block
+ 0x10F70, # .. 0x10FAF ; Old Uyghur
0x10FB0, # .. 0x10FDF ; Chorasmian
0x10FE0, # .. 0x10FFF ; Elymaic
0x11000, # .. 0x1107F ; Brahmi
@@ -262,8 +264,8 @@ RANGES = [
0x11660, # .. 0x1167F ; Mongolian Supplement
0x11680, # .. 0x116CF ; Takri
0x116D0, # .. 0x116FF ; No_Block
- 0x11700, # .. 0x1173F ; Ahom
- 0x11740, # .. 0x117FF ; No_Block
+ 0x11700, # .. 0x1174F ; Ahom
+ 0x11750, # .. 0x117FF ; No_Block
0x11800, # .. 0x1184F ; Dogra
0x11850, # .. 0x1189F ; No_Block
0x118A0, # .. 0x118FF ; Warang Citi
@@ -272,7 +274,7 @@ RANGES = [
0x119A0, # .. 0x119FF ; Nandinagari
0x11A00, # .. 0x11A4F ; Zanabazar Square
0x11A50, # .. 0x11AAF ; Soyombo
- 0x11AB0, # .. 0x11ABF ; No_Block
+ 0x11AB0, # .. 0x11ABF ; Unified Canadian Aboriginal Syllabics Extended-A
0x11AC0, # .. 0x11AFF ; Pau Cin Hau
0x11B00, # .. 0x11BFF ; No_Block
0x11C00, # .. 0x11C6F ; Bhaiksuki
@@ -288,7 +290,8 @@ RANGES = [
0x12000, # .. 0x123FF ; Cuneiform
0x12400, # .. 0x1247F ; Cuneiform Numbers and Punctuation
0x12480, # .. 0x1254F ; Early Dynastic Cuneiform
- 0x12550, # .. 0x12FFF ; No_Block
+ 0x12550, # .. 0x12F8F ; No_Block
+ 0x12F90, # .. 0x12FFF ; Cypro-Minoan
0x13000, # .. 0x1342F ; Egyptian Hieroglyphs
0x13430, # .. 0x1343F ; Egyptian Hieroglyph Format Controls
0x13440, # .. 0x143FF ; No_Block
@@ -296,7 +299,7 @@ RANGES = [
0x14680, # .. 0x167FF ; No_Block
0x16800, # .. 0x16A3F ; Bamum Supplement
0x16A40, # .. 0x16A6F ; Mro
- 0x16A70, # .. 0x16ACF ; No_Block
+ 0x16A70, # .. 0x16ACF ; Tangsa
0x16AD0, # .. 0x16AFF ; Bassa Vah
0x16B00, # .. 0x16B8F ; Pahawh Hmong
0x16B90, # .. 0x16E3F ; No_Block
@@ -308,8 +311,9 @@ RANGES = [
0x17000, # .. 0x187FF ; Tangut
0x18800, # .. 0x18AFF ; Tangut Components
0x18B00, # .. 0x18CFF ; Khitan Small Script
- 0x18D00, # .. 0x18D8F ; Tangut Supplement
- 0x18D90, # .. 0x1AFFF ; No_Block
+ 0x18D00, # .. 0x18D7F ; Tangut Supplement
+ 0x18D80, # .. 0x1AFEF ; No_Block
+ 0x1AFF0, # .. 0x1AFFF ; Kana Extended-B
0x1B000, # .. 0x1B0FF ; Kana Supplement
0x1B100, # .. 0x1B12F ; Kana Extended-A
0x1B130, # .. 0x1B16F ; Small Kana Extension
@@ -317,7 +321,9 @@ RANGES = [
0x1B300, # .. 0x1BBFF ; No_Block
0x1BC00, # .. 0x1BC9F ; Duployan
0x1BCA0, # .. 0x1BCAF ; Shorthand Format Controls
- 0x1BCB0, # .. 0x1CFFF ; No_Block
+ 0x1BCB0, # .. 0x1CEFF ; No_Block
+ 0x1CF00, # .. 0x1CFCF ; Znamenny Musical Notation
+ 0x1CFD0, # .. 0x1CFFF ; No_Block
0x1D000, # .. 0x1D0FF ; Byzantine Musical Symbols
0x1D100, # .. 0x1D1FF ; Musical Symbols
0x1D200, # .. 0x1D24F ; Ancient Greek Musical Notation
@@ -328,13 +334,16 @@ RANGES = [
0x1D380, # .. 0x1D3FF ; No_Block
0x1D400, # .. 0x1D7FF ; Mathematical Alphanumeric Symbols
0x1D800, # .. 0x1DAAF ; Sutton SignWriting
- 0x1DAB0, # .. 0x1DFFF ; No_Block
+ 0x1DAB0, # .. 0x1DEFF ; No_Block
+ 0x1DF00, # .. 0x1DFFF ; Latin Extended-G
0x1E000, # .. 0x1E02F ; Glagolitic Supplement
0x1E030, # .. 0x1E0FF ; No_Block
0x1E100, # .. 0x1E14F ; Nyiakeng Puachue Hmong
- 0x1E150, # .. 0x1E2BF ; No_Block
+ 0x1E150, # .. 0x1E28F ; No_Block
+ 0x1E290, # .. 0x1E2BF ; Toto
0x1E2C0, # .. 0x1E2FF ; Wancho
- 0x1E300, # .. 0x1E7FF ; No_Block
+ 0x1E300, # .. 0x1E7DF ; No_Block
+ 0x1E7E0, # .. 0x1E7FF ; Ethiopic Extended-B
0x1E800, # .. 0x1E8DF ; Mende Kikakui
0x1E8E0, # .. 0x1E8FF ; No_Block
0x1E900, # .. 0x1E95F ; Adlam
@@ -382,366 +391,375 @@ RANGES = [
]
VALUES = [
- 'Basic Latin', # 0000..007F
- 'Latin-1 Supplement', # 0080..00FF
- 'Latin Extended-A', # 0100..017F
- 'Latin Extended-B', # 0180..024F
- 'IPA Extensions', # 0250..02AF
- 'Spacing Modifier Letters', # 02B0..02FF
- 'Combining Diacritical Marks', # 0300..036F
- 'Greek and Coptic', # 0370..03FF
- 'Cyrillic', # 0400..04FF
- 'Cyrillic Supplement', # 0500..052F
- 'Armenian', # 0530..058F
- 'Hebrew', # 0590..05FF
- 'Arabic', # 0600..06FF
- 'Syriac', # 0700..074F
- 'Arabic Supplement', # 0750..077F
- 'Thaana', # 0780..07BF
- 'NKo', # 07C0..07FF
- 'Samaritan', # 0800..083F
- 'Mandaic', # 0840..085F
- 'Syriac Supplement', # 0860..086F
- 'No_Block', # 0870..089F
- 'Arabic Extended-A', # 08A0..08FF
- 'Devanagari', # 0900..097F
- 'Bengali', # 0980..09FF
- 'Gurmukhi', # 0A00..0A7F
- 'Gujarati', # 0A80..0AFF
- 'Oriya', # 0B00..0B7F
- 'Tamil', # 0B80..0BFF
- 'Telugu', # 0C00..0C7F
- 'Kannada', # 0C80..0CFF
- 'Malayalam', # 0D00..0D7F
- 'Sinhala', # 0D80..0DFF
- 'Thai', # 0E00..0E7F
- 'Lao', # 0E80..0EFF
- 'Tibetan', # 0F00..0FFF
- 'Myanmar', # 1000..109F
- 'Georgian', # 10A0..10FF
- 'Hangul Jamo', # 1100..11FF
- 'Ethiopic', # 1200..137F
- 'Ethiopic Supplement', # 1380..139F
- 'Cherokee', # 13A0..13FF
- 'Unified Canadian Aboriginal Syllabics', # 1400..167F
- 'Ogham', # 1680..169F
- 'Runic', # 16A0..16FF
- 'Tagalog', # 1700..171F
- 'Hanunoo', # 1720..173F
- 'Buhid', # 1740..175F
- 'Tagbanwa', # 1760..177F
- 'Khmer', # 1780..17FF
- 'Mongolian', # 1800..18AF
- 'Unified Canadian Aboriginal Syllabics Extended', # 18B0..18FF
- 'Limbu', # 1900..194F
- 'Tai Le', # 1950..197F
- 'New Tai Lue', # 1980..19DF
- 'Khmer Symbols', # 19E0..19FF
- 'Buginese', # 1A00..1A1F
- 'Tai Tham', # 1A20..1AAF
- 'Combining Diacritical Marks Extended', # 1AB0..1AFF
- 'Balinese', # 1B00..1B7F
- 'Sundanese', # 1B80..1BBF
- 'Batak', # 1BC0..1BFF
- 'Lepcha', # 1C00..1C4F
- 'Ol Chiki', # 1C50..1C7F
- 'Cyrillic Extended-C', # 1C80..1C8F
- 'Georgian Extended', # 1C90..1CBF
- 'Sundanese Supplement', # 1CC0..1CCF
- 'Vedic Extensions', # 1CD0..1CFF
- 'Phonetic Extensions', # 1D00..1D7F
- 'Phonetic Extensions Supplement', # 1D80..1DBF
- 'Combining Diacritical Marks Supplement', # 1DC0..1DFF
- 'Latin Extended Additional', # 1E00..1EFF
- 'Greek Extended', # 1F00..1FFF
- 'General Punctuation', # 2000..206F
- 'Superscripts and Subscripts', # 2070..209F
- 'Currency Symbols', # 20A0..20CF
- 'Combining Diacritical Marks for Symbols', # 20D0..20FF
- 'Letterlike Symbols', # 2100..214F
- 'Number Forms', # 2150..218F
- 'Arrows', # 2190..21FF
- 'Mathematical Operators', # 2200..22FF
- 'Miscellaneous Technical', # 2300..23FF
- 'Control Pictures', # 2400..243F
- 'Optical Character Recognition', # 2440..245F
- 'Enclosed Alphanumerics', # 2460..24FF
- 'Box Drawing', # 2500..257F
- 'Block Elements', # 2580..259F
- 'Geometric Shapes', # 25A0..25FF
- 'Miscellaneous Symbols', # 2600..26FF
- 'Dingbats', # 2700..27BF
- 'Miscellaneous Mathematical Symbols-A', # 27C0..27EF
- 'Supplemental Arrows-A', # 27F0..27FF
- 'Braille Patterns', # 2800..28FF
- 'Supplemental Arrows-B', # 2900..297F
- 'Miscellaneous Mathematical Symbols-B', # 2980..29FF
- 'Supplemental Mathematical Operators', # 2A00..2AFF
- 'Miscellaneous Symbols and Arrows', # 2B00..2BFF
- 'Glagolitic', # 2C00..2C5F
- 'Latin Extended-C', # 2C60..2C7F
- 'Coptic', # 2C80..2CFF
- 'Georgian Supplement', # 2D00..2D2F
- 'Tifinagh', # 2D30..2D7F
- 'Ethiopic Extended', # 2D80..2DDF
- 'Cyrillic Extended-A', # 2DE0..2DFF
- 'Supplemental Punctuation', # 2E00..2E7F
- 'CJK Radicals Supplement', # 2E80..2EFF
- 'Kangxi Radicals', # 2F00..2FDF
- 'No_Block', # 2FE0..2FEF
- 'Ideographic Description Characters', # 2FF0..2FFF
- 'CJK Symbols and Punctuation', # 3000..303F
- 'Hiragana', # 3040..309F
- 'Katakana', # 30A0..30FF
- 'Bopomofo', # 3100..312F
- 'Hangul Compatibility Jamo', # 3130..318F
- 'Kanbun', # 3190..319F
- 'Bopomofo Extended', # 31A0..31BF
- 'CJK Strokes', # 31C0..31EF
- 'Katakana Phonetic Extensions', # 31F0..31FF
- 'Enclosed CJK Letters and Months', # 3200..32FF
- 'CJK Compatibility', # 3300..33FF
- 'CJK Unified Ideographs Extension A', # 3400..4DBF
- 'Yijing Hexagram Symbols', # 4DC0..4DFF
- 'CJK Unified Ideographs', # 4E00..9FFF
- 'Yi Syllables', # A000..A48F
- 'Yi Radicals', # A490..A4CF
- 'Lisu', # A4D0..A4FF
- 'Vai', # A500..A63F
- 'Cyrillic Extended-B', # A640..A69F
- 'Bamum', # A6A0..A6FF
- 'Modifier Tone Letters', # A700..A71F
- 'Latin Extended-D', # A720..A7FF
- 'Syloti Nagri', # A800..A82F
- 'Common Indic Number Forms', # A830..A83F
- 'Phags-pa', # A840..A87F
- 'Saurashtra', # A880..A8DF
- 'Devanagari Extended', # A8E0..A8FF
- 'Kayah Li', # A900..A92F
- 'Rejang', # A930..A95F
- 'Hangul Jamo Extended-A', # A960..A97F
- 'Javanese', # A980..A9DF
- 'Myanmar Extended-B', # A9E0..A9FF
- 'Cham', # AA00..AA5F
- 'Myanmar Extended-A', # AA60..AA7F
- 'Tai Viet', # AA80..AADF
- 'Meetei Mayek Extensions', # AAE0..AAFF
- 'Ethiopic Extended-A', # AB00..AB2F
- 'Latin Extended-E', # AB30..AB6F
- 'Cherokee Supplement', # AB70..ABBF
- 'Meetei Mayek', # ABC0..ABFF
- 'Hangul Syllables', # AC00..D7AF
- 'Hangul Jamo Extended-B', # D7B0..D7FF
- 'High Surrogates', # D800..DB7F
- 'High Private Use Surrogates', # DB80..DBFF
- 'Low Surrogates', # DC00..DFFF
- 'Private Use Area', # E000..F8FF
- 'CJK Compatibility Ideographs', # F900..FAFF
- 'Alphabetic Presentation Forms', # FB00..FB4F
- 'Arabic Presentation Forms-A', # FB50..FDFF
- 'Variation Selectors', # FE00..FE0F
- 'Vertical Forms', # FE10..FE1F
- 'Combining Half Marks', # FE20..FE2F
- 'CJK Compatibility Forms', # FE30..FE4F
- 'Small Form Variants', # FE50..FE6F
- 'Arabic Presentation Forms-B', # FE70..FEFF
- 'Halfwidth and Fullwidth Forms', # FF00..FFEF
- 'Specials', # FFF0..FFFF
- 'Linear B Syllabary', # 10000..1007F
- 'Linear B Ideograms', # 10080..100FF
- 'Aegean Numbers', # 10100..1013F
- 'Ancient Greek Numbers', # 10140..1018F
- 'Ancient Symbols', # 10190..101CF
- 'Phaistos Disc', # 101D0..101FF
- 'No_Block', # 10200..1027F
- 'Lycian', # 10280..1029F
- 'Carian', # 102A0..102DF
- 'Coptic Epact Numbers', # 102E0..102FF
- 'Old Italic', # 10300..1032F
- 'Gothic', # 10330..1034F
- 'Old Permic', # 10350..1037F
- 'Ugaritic', # 10380..1039F
- 'Old Persian', # 103A0..103DF
- 'No_Block', # 103E0..103FF
- 'Deseret', # 10400..1044F
- 'Shavian', # 10450..1047F
- 'Osmanya', # 10480..104AF
- 'Osage', # 104B0..104FF
- 'Elbasan', # 10500..1052F
- 'Caucasian Albanian', # 10530..1056F
- 'No_Block', # 10570..105FF
- 'Linear A', # 10600..1077F
- 'No_Block', # 10780..107FF
- 'Cypriot Syllabary', # 10800..1083F
- 'Imperial Aramaic', # 10840..1085F
- 'Palmyrene', # 10860..1087F
- 'Nabataean', # 10880..108AF
- 'No_Block', # 108B0..108DF
- 'Hatran', # 108E0..108FF
- 'Phoenician', # 10900..1091F
- 'Lydian', # 10920..1093F
- 'No_Block', # 10940..1097F
- 'Meroitic Hieroglyphs', # 10980..1099F
- 'Meroitic Cursive', # 109A0..109FF
- 'Kharoshthi', # 10A00..10A5F
- 'Old South Arabian', # 10A60..10A7F
- 'Old North Arabian', # 10A80..10A9F
- 'No_Block', # 10AA0..10ABF
- 'Manichaean', # 10AC0..10AFF
- 'Avestan', # 10B00..10B3F
- 'Inscriptional Parthian', # 10B40..10B5F
- 'Inscriptional Pahlavi', # 10B60..10B7F
- 'Psalter Pahlavi', # 10B80..10BAF
- 'No_Block', # 10BB0..10BFF
- 'Old Turkic', # 10C00..10C4F
- 'No_Block', # 10C50..10C7F
- 'Old Hungarian', # 10C80..10CFF
- 'Hanifi Rohingya', # 10D00..10D3F
- 'No_Block', # 10D40..10E5F
- 'Rumi Numeral Symbols', # 10E60..10E7F
- 'Yezidi', # 10E80..10EBF
- 'No_Block', # 10EC0..10EFF
- 'Old Sogdian', # 10F00..10F2F
- 'Sogdian', # 10F30..10F6F
- 'No_Block', # 10F70..10FAF
- 'Chorasmian', # 10FB0..10FDF
- 'Elymaic', # 10FE0..10FFF
- 'Brahmi', # 11000..1107F
- 'Kaithi', # 11080..110CF
- 'Sora Sompeng', # 110D0..110FF
- 'Chakma', # 11100..1114F
- 'Mahajani', # 11150..1117F
- 'Sharada', # 11180..111DF
- 'Sinhala Archaic Numbers', # 111E0..111FF
- 'Khojki', # 11200..1124F
- 'No_Block', # 11250..1127F
- 'Multani', # 11280..112AF
- 'Khudawadi', # 112B0..112FF
- 'Grantha', # 11300..1137F
- 'No_Block', # 11380..113FF
- 'Newa', # 11400..1147F
- 'Tirhuta', # 11480..114DF
- 'No_Block', # 114E0..1157F
- 'Siddham', # 11580..115FF
- 'Modi', # 11600..1165F
- 'Mongolian Supplement', # 11660..1167F
- 'Takri', # 11680..116CF
- 'No_Block', # 116D0..116FF
- 'Ahom', # 11700..1173F
- 'No_Block', # 11740..117FF
- 'Dogra', # 11800..1184F
- 'No_Block', # 11850..1189F
- 'Warang Citi', # 118A0..118FF
- 'Dives Akuru', # 11900..1195F
- 'No_Block', # 11960..1199F
- 'Nandinagari', # 119A0..119FF
- 'Zanabazar Square', # 11A00..11A4F
- 'Soyombo', # 11A50..11AAF
- 'No_Block', # 11AB0..11ABF
- 'Pau Cin Hau', # 11AC0..11AFF
- 'No_Block', # 11B00..11BFF
- 'Bhaiksuki', # 11C00..11C6F
- 'Marchen', # 11C70..11CBF
- 'No_Block', # 11CC0..11CFF
- 'Masaram Gondi', # 11D00..11D5F
- 'Gunjala Gondi', # 11D60..11DAF
- 'No_Block', # 11DB0..11EDF
- 'Makasar', # 11EE0..11EFF
- 'No_Block', # 11F00..11FAF
- 'Lisu Supplement', # 11FB0..11FBF
- 'Tamil Supplement', # 11FC0..11FFF
- 'Cuneiform', # 12000..123FF
- 'Cuneiform Numbers and Punctuation', # 12400..1247F
- 'Early Dynastic Cuneiform', # 12480..1254F
- 'No_Block', # 12550..12FFF
- 'Egyptian Hieroglyphs', # 13000..1342F
- 'Egyptian Hieroglyph Format Controls', # 13430..1343F
- 'No_Block', # 13440..143FF
- 'Anatolian Hieroglyphs', # 14400..1467F
- 'No_Block', # 14680..167FF
- 'Bamum Supplement', # 16800..16A3F
- 'Mro', # 16A40..16A6F
- 'No_Block', # 16A70..16ACF
- 'Bassa Vah', # 16AD0..16AFF
- 'Pahawh Hmong', # 16B00..16B8F
- 'No_Block', # 16B90..16E3F
- 'Medefaidrin', # 16E40..16E9F
- 'No_Block', # 16EA0..16EFF
- 'Miao', # 16F00..16F9F
- 'No_Block', # 16FA0..16FDF
- 'Ideographic Symbols and Punctuation', # 16FE0..16FFF
- 'Tangut', # 17000..187FF
- 'Tangut Components', # 18800..18AFF
- 'Khitan Small Script', # 18B00..18CFF
- 'Tangut Supplement', # 18D00..18D8F
- 'No_Block', # 18D90..1AFFF
- 'Kana Supplement', # 1B000..1B0FF
- 'Kana Extended-A', # 1B100..1B12F
- 'Small Kana Extension', # 1B130..1B16F
- 'Nushu', # 1B170..1B2FF
- 'No_Block', # 1B300..1BBFF
- 'Duployan', # 1BC00..1BC9F
- 'Shorthand Format Controls', # 1BCA0..1BCAF
- 'No_Block', # 1BCB0..1CFFF
- 'Byzantine Musical Symbols', # 1D000..1D0FF
- 'Musical Symbols', # 1D100..1D1FF
- 'Ancient Greek Musical Notation', # 1D200..1D24F
- 'No_Block', # 1D250..1D2DF
- 'Mayan Numerals', # 1D2E0..1D2FF
- 'Tai Xuan Jing Symbols', # 1D300..1D35F
- 'Counting Rod Numerals', # 1D360..1D37F
- 'No_Block', # 1D380..1D3FF
- 'Mathematical Alphanumeric Symbols', # 1D400..1D7FF
- 'Sutton SignWriting', # 1D800..1DAAF
- 'No_Block', # 1DAB0..1DFFF
- 'Glagolitic Supplement', # 1E000..1E02F
- 'No_Block', # 1E030..1E0FF
- 'Nyiakeng Puachue Hmong', # 1E100..1E14F
- 'No_Block', # 1E150..1E2BF
- 'Wancho', # 1E2C0..1E2FF
- 'No_Block', # 1E300..1E7FF
- 'Mende Kikakui', # 1E800..1E8DF
- 'No_Block', # 1E8E0..1E8FF
- 'Adlam', # 1E900..1E95F
- 'No_Block', # 1E960..1EC6F
- 'Indic Siyaq Numbers', # 1EC70..1ECBF
- 'No_Block', # 1ECC0..1ECFF
- 'Ottoman Siyaq Numbers', # 1ED00..1ED4F
- 'No_Block', # 1ED50..1EDFF
- 'Arabic Mathematical Alphabetic Symbols', # 1EE00..1EEFF
- 'No_Block', # 1EF00..1EFFF
- 'Mahjong Tiles', # 1F000..1F02F
- 'Domino Tiles', # 1F030..1F09F
- 'Playing Cards', # 1F0A0..1F0FF
- 'Enclosed Alphanumeric Supplement', # 1F100..1F1FF
- 'Enclosed Ideographic Supplement', # 1F200..1F2FF
- 'Miscellaneous Symbols and Pictographs', # 1F300..1F5FF
- 'Emoticons', # 1F600..1F64F
- 'Ornamental Dingbats', # 1F650..1F67F
- 'Transport and Map Symbols', # 1F680..1F6FF
- 'Alchemical Symbols', # 1F700..1F77F
- 'Geometric Shapes Extended', # 1F780..1F7FF
- 'Supplemental Arrows-C', # 1F800..1F8FF
- 'Supplemental Symbols and Pictographs', # 1F900..1F9FF
- 'Chess Symbols', # 1FA00..1FA6F
- 'Symbols and Pictographs Extended-A', # 1FA70..1FAFF
- 'Symbols for Legacy Computing', # 1FB00..1FBFF
- 'No_Block', # 1FC00..1FFFF
- 'CJK Unified Ideographs Extension B', # 20000..2A6DF
- 'No_Block', # 2A6E0..2A6FF
- 'CJK Unified Ideographs Extension C', # 2A700..2B73F
- 'CJK Unified Ideographs Extension D', # 2B740..2B81F
- 'CJK Unified Ideographs Extension E', # 2B820..2CEAF
- 'CJK Unified Ideographs Extension F', # 2CEB0..2EBEF
- 'No_Block', # 2EBF0..2F7FF
- 'CJK Compatibility Ideographs Supplement', # 2F800..2FA1F
- 'No_Block', # 2FA20..2FFFF
- 'CJK Unified Ideographs Extension G', # 30000..3134F
- 'No_Block', # 31350..DFFFF
- 'Tags', # E0000..E007F
- 'No_Block', # E0080..E00FF
- 'Variation Selectors Supplement', # E0100..E01EF
- 'No_Block', # E01F0..EFFFF
- 'Supplementary Private Use Area-A', # F0000..FFFFF
- 'Supplementary Private Use Area-B', # 100000..10FFFF
+ 'Basic Latin', # 0000..007F
+ 'Latin-1 Supplement', # 0080..00FF
+ 'Latin Extended-A', # 0100..017F
+ 'Latin Extended-B', # 0180..024F
+ 'IPA Extensions', # 0250..02AF
+ 'Spacing Modifier Letters', # 02B0..02FF
+ 'Combining Diacritical Marks', # 0300..036F
+ 'Greek and Coptic', # 0370..03FF
+ 'Cyrillic', # 0400..04FF
+ 'Cyrillic Supplement', # 0500..052F
+ 'Armenian', # 0530..058F
+ 'Hebrew', # 0590..05FF
+ 'Arabic', # 0600..06FF
+ 'Syriac', # 0700..074F
+ 'Arabic Supplement', # 0750..077F
+ 'Thaana', # 0780..07BF
+ 'NKo', # 07C0..07FF
+ 'Samaritan', # 0800..083F
+ 'Mandaic', # 0840..085F
+ 'Syriac Supplement', # 0860..086F
+ 'Arabic Extended-B', # 0870..089F
+ 'Arabic Extended-A', # 08A0..08FF
+ 'Devanagari', # 0900..097F
+ 'Bengali', # 0980..09FF
+ 'Gurmukhi', # 0A00..0A7F
+ 'Gujarati', # 0A80..0AFF
+ 'Oriya', # 0B00..0B7F
+ 'Tamil', # 0B80..0BFF
+ 'Telugu', # 0C00..0C7F
+ 'Kannada', # 0C80..0CFF
+ 'Malayalam', # 0D00..0D7F
+ 'Sinhala', # 0D80..0DFF
+ 'Thai', # 0E00..0E7F
+ 'Lao', # 0E80..0EFF
+ 'Tibetan', # 0F00..0FFF
+ 'Myanmar', # 1000..109F
+ 'Georgian', # 10A0..10FF
+ 'Hangul Jamo', # 1100..11FF
+ 'Ethiopic', # 1200..137F
+ 'Ethiopic Supplement', # 1380..139F
+ 'Cherokee', # 13A0..13FF
+ 'Unified Canadian Aboriginal Syllabics', # 1400..167F
+ 'Ogham', # 1680..169F
+ 'Runic', # 16A0..16FF
+ 'Tagalog', # 1700..171F
+ 'Hanunoo', # 1720..173F
+ 'Buhid', # 1740..175F
+ 'Tagbanwa', # 1760..177F
+ 'Khmer', # 1780..17FF
+ 'Mongolian', # 1800..18AF
+ 'Unified Canadian Aboriginal Syllabics Extended', # 18B0..18FF
+ 'Limbu', # 1900..194F
+ 'Tai Le', # 1950..197F
+ 'New Tai Lue', # 1980..19DF
+ 'Khmer Symbols', # 19E0..19FF
+ 'Buginese', # 1A00..1A1F
+ 'Tai Tham', # 1A20..1AAF
+ 'Combining Diacritical Marks Extended', # 1AB0..1AFF
+ 'Balinese', # 1B00..1B7F
+ 'Sundanese', # 1B80..1BBF
+ 'Batak', # 1BC0..1BFF
+ 'Lepcha', # 1C00..1C4F
+ 'Ol Chiki', # 1C50..1C7F
+ 'Cyrillic Extended-C', # 1C80..1C8F
+ 'Georgian Extended', # 1C90..1CBF
+ 'Sundanese Supplement', # 1CC0..1CCF
+ 'Vedic Extensions', # 1CD0..1CFF
+ 'Phonetic Extensions', # 1D00..1D7F
+ 'Phonetic Extensions Supplement', # 1D80..1DBF
+ 'Combining Diacritical Marks Supplement', # 1DC0..1DFF
+ 'Latin Extended Additional', # 1E00..1EFF
+ 'Greek Extended', # 1F00..1FFF
+ 'General Punctuation', # 2000..206F
+ 'Superscripts and Subscripts', # 2070..209F
+ 'Currency Symbols', # 20A0..20CF
+ 'Combining Diacritical Marks for Symbols', # 20D0..20FF
+ 'Letterlike Symbols', # 2100..214F
+ 'Number Forms', # 2150..218F
+ 'Arrows', # 2190..21FF
+ 'Mathematical Operators', # 2200..22FF
+ 'Miscellaneous Technical', # 2300..23FF
+ 'Control Pictures', # 2400..243F
+ 'Optical Character Recognition', # 2440..245F
+ 'Enclosed Alphanumerics', # 2460..24FF
+ 'Box Drawing', # 2500..257F
+ 'Block Elements', # 2580..259F
+ 'Geometric Shapes', # 25A0..25FF
+ 'Miscellaneous Symbols', # 2600..26FF
+ 'Dingbats', # 2700..27BF
+ 'Miscellaneous Mathematical Symbols-A', # 27C0..27EF
+ 'Supplemental Arrows-A', # 27F0..27FF
+ 'Braille Patterns', # 2800..28FF
+ 'Supplemental Arrows-B', # 2900..297F
+ 'Miscellaneous Mathematical Symbols-B', # 2980..29FF
+ 'Supplemental Mathematical Operators', # 2A00..2AFF
+ 'Miscellaneous Symbols and Arrows', # 2B00..2BFF
+ 'Glagolitic', # 2C00..2C5F
+ 'Latin Extended-C', # 2C60..2C7F
+ 'Coptic', # 2C80..2CFF
+ 'Georgian Supplement', # 2D00..2D2F
+ 'Tifinagh', # 2D30..2D7F
+ 'Ethiopic Extended', # 2D80..2DDF
+ 'Cyrillic Extended-A', # 2DE0..2DFF
+ 'Supplemental Punctuation', # 2E00..2E7F
+ 'CJK Radicals Supplement', # 2E80..2EFF
+ 'Kangxi Radicals', # 2F00..2FDF
+ 'No_Block', # 2FE0..2FEF
+ 'Ideographic Description Characters', # 2FF0..2FFF
+ 'CJK Symbols and Punctuation', # 3000..303F
+ 'Hiragana', # 3040..309F
+ 'Katakana', # 30A0..30FF
+ 'Bopomofo', # 3100..312F
+ 'Hangul Compatibility Jamo', # 3130..318F
+ 'Kanbun', # 3190..319F
+ 'Bopomofo Extended', # 31A0..31BF
+ 'CJK Strokes', # 31C0..31EF
+ 'Katakana Phonetic Extensions', # 31F0..31FF
+ 'Enclosed CJK Letters and Months', # 3200..32FF
+ 'CJK Compatibility', # 3300..33FF
+ 'CJK Unified Ideographs Extension A', # 3400..4DBF
+ 'Yijing Hexagram Symbols', # 4DC0..4DFF
+ 'CJK Unified Ideographs', # 4E00..9FFF
+ 'Yi Syllables', # A000..A48F
+ 'Yi Radicals', # A490..A4CF
+ 'Lisu', # A4D0..A4FF
+ 'Vai', # A500..A63F
+ 'Cyrillic Extended-B', # A640..A69F
+ 'Bamum', # A6A0..A6FF
+ 'Modifier Tone Letters', # A700..A71F
+ 'Latin Extended-D', # A720..A7FF
+ 'Syloti Nagri', # A800..A82F
+ 'Common Indic Number Forms', # A830..A83F
+ 'Phags-pa', # A840..A87F
+ 'Saurashtra', # A880..A8DF
+ 'Devanagari Extended', # A8E0..A8FF
+ 'Kayah Li', # A900..A92F
+ 'Rejang', # A930..A95F
+ 'Hangul Jamo Extended-A', # A960..A97F
+ 'Javanese', # A980..A9DF
+ 'Myanmar Extended-B', # A9E0..A9FF
+ 'Cham', # AA00..AA5F
+ 'Myanmar Extended-A', # AA60..AA7F
+ 'Tai Viet', # AA80..AADF
+ 'Meetei Mayek Extensions', # AAE0..AAFF
+ 'Ethiopic Extended-A', # AB00..AB2F
+ 'Latin Extended-E', # AB30..AB6F
+ 'Cherokee Supplement', # AB70..ABBF
+ 'Meetei Mayek', # ABC0..ABFF
+ 'Hangul Syllables', # AC00..D7AF
+ 'Hangul Jamo Extended-B', # D7B0..D7FF
+ 'High Surrogates', # D800..DB7F
+ 'High Private Use Surrogates', # DB80..DBFF
+ 'Low Surrogates', # DC00..DFFF
+ 'Private Use Area', # E000..F8FF
+ 'CJK Compatibility Ideographs', # F900..FAFF
+ 'Alphabetic Presentation Forms', # FB00..FB4F
+ 'Arabic Presentation Forms-A', # FB50..FDFF
+ 'Variation Selectors', # FE00..FE0F
+ 'Vertical Forms', # FE10..FE1F
+ 'Combining Half Marks', # FE20..FE2F
+ 'CJK Compatibility Forms', # FE30..FE4F
+ 'Small Form Variants', # FE50..FE6F
+ 'Arabic Presentation Forms-B', # FE70..FEFF
+ 'Halfwidth and Fullwidth Forms', # FF00..FFEF
+ 'Specials', # FFF0..FFFF
+ 'Linear B Syllabary', # 10000..1007F
+ 'Linear B Ideograms', # 10080..100FF
+ 'Aegean Numbers', # 10100..1013F
+ 'Ancient Greek Numbers', # 10140..1018F
+ 'Ancient Symbols', # 10190..101CF
+ 'Phaistos Disc', # 101D0..101FF
+ 'No_Block', # 10200..1027F
+ 'Lycian', # 10280..1029F
+ 'Carian', # 102A0..102DF
+ 'Coptic Epact Numbers', # 102E0..102FF
+ 'Old Italic', # 10300..1032F
+ 'Gothic', # 10330..1034F
+ 'Old Permic', # 10350..1037F
+ 'Ugaritic', # 10380..1039F
+ 'Old Persian', # 103A0..103DF
+ 'No_Block', # 103E0..103FF
+ 'Deseret', # 10400..1044F
+ 'Shavian', # 10450..1047F
+ 'Osmanya', # 10480..104AF
+ 'Osage', # 104B0..104FF
+ 'Elbasan', # 10500..1052F
+ 'Caucasian Albanian', # 10530..1056F
+ 'Vithkuqi', # 10570..105BF
+ 'No_Block', # 105C0..105FF
+ 'Linear A', # 10600..1077F
+ 'Latin Extended-F', # 10780..107BF
+ 'No_Block', # 107C0..107FF
+ 'Cypriot Syllabary', # 10800..1083F
+ 'Imperial Aramaic', # 10840..1085F
+ 'Palmyrene', # 10860..1087F
+ 'Nabataean', # 10880..108AF
+ 'No_Block', # 108B0..108DF
+ 'Hatran', # 108E0..108FF
+ 'Phoenician', # 10900..1091F
+ 'Lydian', # 10920..1093F
+ 'No_Block', # 10940..1097F
+ 'Meroitic Hieroglyphs', # 10980..1099F
+ 'Meroitic Cursive', # 109A0..109FF
+ 'Kharoshthi', # 10A00..10A5F
+ 'Old South Arabian', # 10A60..10A7F
+ 'Old North Arabian', # 10A80..10A9F
+ 'No_Block', # 10AA0..10ABF
+ 'Manichaean', # 10AC0..10AFF
+ 'Avestan', # 10B00..10B3F
+ 'Inscriptional Parthian', # 10B40..10B5F
+ 'Inscriptional Pahlavi', # 10B60..10B7F
+ 'Psalter Pahlavi', # 10B80..10BAF
+ 'No_Block', # 10BB0..10BFF
+ 'Old Turkic', # 10C00..10C4F
+ 'No_Block', # 10C50..10C7F
+ 'Old Hungarian', # 10C80..10CFF
+ 'Hanifi Rohingya', # 10D00..10D3F
+ 'No_Block', # 10D40..10E5F
+ 'Rumi Numeral Symbols', # 10E60..10E7F
+ 'Yezidi', # 10E80..10EBF
+ 'No_Block', # 10EC0..10EFF
+ 'Old Sogdian', # 10F00..10F2F
+ 'Sogdian', # 10F30..10F6F
+ 'Old Uyghur', # 10F70..10FAF
+ 'Chorasmian', # 10FB0..10FDF
+ 'Elymaic', # 10FE0..10FFF
+ 'Brahmi', # 11000..1107F
+ 'Kaithi', # 11080..110CF
+ 'Sora Sompeng', # 110D0..110FF
+ 'Chakma', # 11100..1114F
+ 'Mahajani', # 11150..1117F
+ 'Sharada', # 11180..111DF
+ 'Sinhala Archaic Numbers', # 111E0..111FF
+ 'Khojki', # 11200..1124F
+ 'No_Block', # 11250..1127F
+ 'Multani', # 11280..112AF
+ 'Khudawadi', # 112B0..112FF
+ 'Grantha', # 11300..1137F
+ 'No_Block', # 11380..113FF
+ 'Newa', # 11400..1147F
+ 'Tirhuta', # 11480..114DF
+ 'No_Block', # 114E0..1157F
+ 'Siddham', # 11580..115FF
+ 'Modi', # 11600..1165F
+ 'Mongolian Supplement', # 11660..1167F
+ 'Takri', # 11680..116CF
+ 'No_Block', # 116D0..116FF
+ 'Ahom', # 11700..1174F
+ 'No_Block', # 11750..117FF
+ 'Dogra', # 11800..1184F
+ 'No_Block', # 11850..1189F
+ 'Warang Citi', # 118A0..118FF
+ 'Dives Akuru', # 11900..1195F
+ 'No_Block', # 11960..1199F
+ 'Nandinagari', # 119A0..119FF
+ 'Zanabazar Square', # 11A00..11A4F
+ 'Soyombo', # 11A50..11AAF
+ 'Unified Canadian Aboriginal Syllabics Extended-A', # 11AB0..11ABF
+ 'Pau Cin Hau', # 11AC0..11AFF
+ 'No_Block', # 11B00..11BFF
+ 'Bhaiksuki', # 11C00..11C6F
+ 'Marchen', # 11C70..11CBF
+ 'No_Block', # 11CC0..11CFF
+ 'Masaram Gondi', # 11D00..11D5F
+ 'Gunjala Gondi', # 11D60..11DAF
+ 'No_Block', # 11DB0..11EDF
+ 'Makasar', # 11EE0..11EFF
+ 'No_Block', # 11F00..11FAF
+ 'Lisu Supplement', # 11FB0..11FBF
+ 'Tamil Supplement', # 11FC0..11FFF
+ 'Cuneiform', # 12000..123FF
+ 'Cuneiform Numbers and Punctuation', # 12400..1247F
+ 'Early Dynastic Cuneiform', # 12480..1254F
+ 'No_Block', # 12550..12F8F
+ 'Cypro-Minoan', # 12F90..12FFF
+ 'Egyptian Hieroglyphs', # 13000..1342F
+ 'Egyptian Hieroglyph Format Controls', # 13430..1343F
+ 'No_Block', # 13440..143FF
+ 'Anatolian Hieroglyphs', # 14400..1467F
+ 'No_Block', # 14680..167FF
+ 'Bamum Supplement', # 16800..16A3F
+ 'Mro', # 16A40..16A6F
+ 'Tangsa', # 16A70..16ACF
+ 'Bassa Vah', # 16AD0..16AFF
+ 'Pahawh Hmong', # 16B00..16B8F
+ 'No_Block', # 16B90..16E3F
+ 'Medefaidrin', # 16E40..16E9F
+ 'No_Block', # 16EA0..16EFF
+ 'Miao', # 16F00..16F9F
+ 'No_Block', # 16FA0..16FDF
+ 'Ideographic Symbols and Punctuation', # 16FE0..16FFF
+ 'Tangut', # 17000..187FF
+ 'Tangut Components', # 18800..18AFF
+ 'Khitan Small Script', # 18B00..18CFF
+ 'Tangut Supplement', # 18D00..18D7F
+ 'No_Block', # 18D80..1AFEF
+ 'Kana Extended-B', # 1AFF0..1AFFF
+ 'Kana Supplement', # 1B000..1B0FF
+ 'Kana Extended-A', # 1B100..1B12F
+ 'Small Kana Extension', # 1B130..1B16F
+ 'Nushu', # 1B170..1B2FF
+ 'No_Block', # 1B300..1BBFF
+ 'Duployan', # 1BC00..1BC9F
+ 'Shorthand Format Controls', # 1BCA0..1BCAF
+ 'No_Block', # 1BCB0..1CEFF
+ 'Znamenny Musical Notation', # 1CF00..1CFCF
+ 'No_Block', # 1CFD0..1CFFF
+ 'Byzantine Musical Symbols', # 1D000..1D0FF
+ 'Musical Symbols', # 1D100..1D1FF
+ 'Ancient Greek Musical Notation', # 1D200..1D24F
+ 'No_Block', # 1D250..1D2DF
+ 'Mayan Numerals', # 1D2E0..1D2FF
+ 'Tai Xuan Jing Symbols', # 1D300..1D35F
+ 'Counting Rod Numerals', # 1D360..1D37F
+ 'No_Block', # 1D380..1D3FF
+ 'Mathematical Alphanumeric Symbols', # 1D400..1D7FF
+ 'Sutton SignWriting', # 1D800..1DAAF
+ 'No_Block', # 1DAB0..1DEFF
+ 'Latin Extended-G', # 1DF00..1DFFF
+ 'Glagolitic Supplement', # 1E000..1E02F
+ 'No_Block', # 1E030..1E0FF
+ 'Nyiakeng Puachue Hmong', # 1E100..1E14F
+ 'No_Block', # 1E150..1E28F
+ 'Toto', # 1E290..1E2BF
+ 'Wancho', # 1E2C0..1E2FF
+ 'No_Block', # 1E300..1E7DF
+ 'Ethiopic Extended-B', # 1E7E0..1E7FF
+ 'Mende Kikakui', # 1E800..1E8DF
+ 'No_Block', # 1E8E0..1E8FF
+ 'Adlam', # 1E900..1E95F
+ 'No_Block', # 1E960..1EC6F
+ 'Indic Siyaq Numbers', # 1EC70..1ECBF
+ 'No_Block', # 1ECC0..1ECFF
+ 'Ottoman Siyaq Numbers', # 1ED00..1ED4F
+ 'No_Block', # 1ED50..1EDFF
+ 'Arabic Mathematical Alphabetic Symbols', # 1EE00..1EEFF
+ 'No_Block', # 1EF00..1EFFF
+ 'Mahjong Tiles', # 1F000..1F02F
+ 'Domino Tiles', # 1F030..1F09F
+ 'Playing Cards', # 1F0A0..1F0FF
+ 'Enclosed Alphanumeric Supplement', # 1F100..1F1FF
+ 'Enclosed Ideographic Supplement', # 1F200..1F2FF
+ 'Miscellaneous Symbols and Pictographs', # 1F300..1F5FF
+ 'Emoticons', # 1F600..1F64F
+ 'Ornamental Dingbats', # 1F650..1F67F
+ 'Transport and Map Symbols', # 1F680..1F6FF
+ 'Alchemical Symbols', # 1F700..1F77F
+ 'Geometric Shapes Extended', # 1F780..1F7FF
+ 'Supplemental Arrows-C', # 1F800..1F8FF
+ 'Supplemental Symbols and Pictographs', # 1F900..1F9FF
+ 'Chess Symbols', # 1FA00..1FA6F
+ 'Symbols and Pictographs Extended-A', # 1FA70..1FAFF
+ 'Symbols for Legacy Computing', # 1FB00..1FBFF
+ 'No_Block', # 1FC00..1FFFF
+ 'CJK Unified Ideographs Extension B', # 20000..2A6DF
+ 'No_Block', # 2A6E0..2A6FF
+ 'CJK Unified Ideographs Extension C', # 2A700..2B73F
+ 'CJK Unified Ideographs Extension D', # 2B740..2B81F
+ 'CJK Unified Ideographs Extension E', # 2B820..2CEAF
+ 'CJK Unified Ideographs Extension F', # 2CEB0..2EBEF
+ 'No_Block', # 2EBF0..2F7FF
+ 'CJK Compatibility Ideographs Supplement', # 2F800..2FA1F
+ 'No_Block', # 2FA20..2FFFF
+ 'CJK Unified Ideographs Extension G', # 30000..3134F
+ 'No_Block', # 31350..DFFFF
+ 'Tags', # E0000..E007F
+ 'No_Block', # E0080..E00FF
+ 'Variation Selectors Supplement', # E0100..E01EF
+ 'No_Block', # E01F0..EFFFF
+ 'Supplementary Private Use Area-A', # F0000..FFFFF
+ 'Supplementary Private Use Area-B', # 100000..10FFFF
]
diff --git a/Lib/fontTools/unicodedata/OTTags.py b/Lib/fontTools/unicodedata/OTTags.py
index 39226805..a9d8cd1c 100644
--- a/Lib/fontTools/unicodedata/OTTags.py
+++ b/Lib/fontTools/unicodedata/OTTags.py
@@ -11,6 +11,10 @@
DEFAULT_SCRIPT = "DFLT"
+SCRIPT_ALIASES = {
+ "jamo": "hang",
+}
+
SCRIPT_EXCEPTIONS = {
"Hira": "kana",
"Hrkt": "kana",
diff --git a/Lib/fontTools/unicodedata/ScriptExtensions.py b/Lib/fontTools/unicodedata/ScriptExtensions.py
index b4e09cd2..b078c13e 100644
--- a/Lib/fontTools/unicodedata/ScriptExtensions.py
+++ b/Lib/fontTools/unicodedata/ScriptExtensions.py
@@ -4,9 +4,9 @@
# Source: https://unicode.org/Public/UNIDATA/ScriptExtensions.txt
# License: http://unicode.org/copyright.html#License
#
-# ScriptExtensions-13.0.0.txt
-# Date: 2020-01-22, 00:07:43 GMT
-# © 2020 Unicode®, Inc.
+# ScriptExtensions-14.0.0.txt
+# Date: 2021-06-04, 02:19:38 GMT
+# © 2021 Unicode®, Inc.
# Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries.
# For terms of use, see http://www.unicode.org/terms_of_use.html
#
@@ -17,10 +17,10 @@
# with more than one script, but with a limited number of scripts.
# For each code point, there is one or more property values. Each such value is a Script property value.
# For more information, see:
-# UAX #24, Unicode Script Property: http://www.unicode.org/reports/tr24/
+# UAX #24, Unicode Script Property: https://www.unicode.org/reports/tr24/
# Especially the sections:
-# http://www.unicode.org/reports/tr24/#Assignment_Script_Values
-# http://www.unicode.org/reports/tr24/#Assignment_ScriptX_Values
+# https://www.unicode.org/reports/tr24/#Assignment_Script_Values
+# https://www.unicode.org/reports/tr24/#Assignment_ScriptX_Values
#
# Each Script_Extensions value in this file consists of a set
# of one or more abbreviated Script property values. The ordering of the
@@ -53,14 +53,14 @@ RANGES = [
0x0485, # .. 0x0486 ; {'Cyrl', 'Latn'}
0x0487, # .. 0x0487 ; {'Cyrl', 'Glag'}
0x0488, # .. 0x060B ; None
- 0x060C, # .. 0x060C ; {'Arab', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}
+ 0x060C, # .. 0x060C ; {'Arab', 'Nkoo', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}
0x060D, # .. 0x061A ; None
- 0x061B, # .. 0x061B ; {'Arab', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}
+ 0x061B, # .. 0x061B ; {'Arab', 'Nkoo', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}
0x061C, # .. 0x061C ; {'Arab', 'Syrc', 'Thaa'}
0x061D, # .. 0x061E ; None
- 0x061F, # .. 0x061F ; {'Arab', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}
+ 0x061F, # .. 0x061F ; {'Adlm', 'Arab', 'Nkoo', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}
0x0620, # .. 0x063F ; None
- 0x0640, # .. 0x0640 ; {'Adlm', 'Arab', 'Mand', 'Mani', 'Phlp', 'Rohg', 'Sogd', 'Syrc'}
+ 0x0640, # .. 0x0640 ; {'Adlm', 'Arab', 'Mand', 'Mani', 'Ougr', 'Phlp', 'Rohg', 'Sogd', 'Syrc'}
0x0641, # .. 0x064A ; None
0x064B, # .. 0x0655 ; {'Arab', 'Syrc'}
0x0656, # .. 0x065F ; None
@@ -129,7 +129,9 @@ RANGES = [
0x1DC0, # .. 0x1DC1 ; {'Grek'}
0x1DC2, # .. 0x1DF7 ; None
0x1DF8, # .. 0x1DF8 ; {'Cyrl', 'Syrc'}
- 0x1DF9, # .. 0x202E ; None
+ 0x1DF9, # .. 0x1DF9 ; None
+ 0x1DFA, # .. 0x1DFA ; {'Syrc'}
+ 0x1DFB, # .. 0x202E ; None
0x202F, # .. 0x202F ; {'Latn', 'Mong'}
0x2030, # .. 0x20EF ; None
0x20F0, # .. 0x20F0 ; {'Deva', 'Gran', 'Latn'}
@@ -197,7 +199,9 @@ RANGES = [
0xA92E, # .. 0xA92E ; {'Kali', 'Latn', 'Mymr'}
0xA92F, # .. 0xA9CE ; None
0xA9CF, # .. 0xA9CF ; {'Bugi', 'Java'}
- 0xA9D0, # .. 0xFDF1 ; None
+ 0xA9D0, # .. 0xFD3D ; None
+ 0xFD3E, # .. 0xFD3F ; {'Arab', 'Nkoo'}
+ 0xFD40, # .. 0xFDF1 ; None
0xFDF2, # .. 0xFDF2 ; {'Arab', 'Thaa'}
0xFDF3, # .. 0xFDFC ; None
0xFDFD, # .. 0xFDFD ; {'Arab', 'Thaa'}
@@ -210,14 +214,17 @@ RANGES = [
0xFF71, # .. 0xFF9D ; None
0xFF9E, # .. 0xFF9F ; {'Hira', 'Kana'}
0xFFA0, # .. 0x100FF ; None
- 0x10100, # .. 0x10102 ; {'Cprt', 'Linb'}
+ 0x10100, # .. 0x10101 ; {'Cpmn', 'Cprt', 'Linb'}
+ 0x10102, # .. 0x10102 ; {'Cprt', 'Linb'}
0x10103, # .. 0x10106 ; None
0x10107, # .. 0x10133 ; {'Cprt', 'Lina', 'Linb'}
0x10134, # .. 0x10136 ; None
0x10137, # .. 0x1013F ; {'Cprt', 'Linb'}
0x10140, # .. 0x102DF ; None
0x102E0, # .. 0x102FB ; {'Arab', 'Copt'}
- 0x102FC, # .. 0x11300 ; None
+ 0x102FC, # .. 0x10AF1 ; None
+ 0x10AF2, # .. 0x10AF2 ; {'Mani', 'Ougr'}
+ 0x10AF3, # .. 0x11300 ; None
0x11301, # .. 0x11301 ; {'Gran', 'Taml'}
0x11302, # .. 0x11302 ; None
0x11303, # .. 0x11303 ; {'Gran', 'Taml'}
@@ -249,14 +256,14 @@ VALUES = [
{'Cyrl', 'Latn'}, # 0485..0486
{'Cyrl', 'Glag'}, # 0487..0487
None, # 0488..060B
- {'Arab', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}, # 060C..060C
+ {'Arab', 'Nkoo', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}, # 060C..060C
None, # 060D..061A
- {'Arab', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}, # 061B..061B
+ {'Arab', 'Nkoo', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}, # 061B..061B
{'Arab', 'Syrc', 'Thaa'}, # 061C..061C
None, # 061D..061E
- {'Arab', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}, # 061F..061F
+ {'Adlm', 'Arab', 'Nkoo', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}, # 061F..061F
None, # 0620..063F
- {'Adlm', 'Arab', 'Mand', 'Mani', 'Phlp', 'Rohg', 'Sogd', 'Syrc'}, # 0640..0640
+ {'Adlm', 'Arab', 'Mand', 'Mani', 'Ougr', 'Phlp', 'Rohg', 'Sogd', 'Syrc'}, # 0640..0640
None, # 0641..064A
{'Arab', 'Syrc'}, # 064B..0655
None, # 0656..065F
@@ -325,7 +332,9 @@ VALUES = [
{'Grek'}, # 1DC0..1DC1
None, # 1DC2..1DF7
{'Cyrl', 'Syrc'}, # 1DF8..1DF8
- None, # 1DF9..202E
+ None, # 1DF9..1DF9
+ {'Syrc'}, # 1DFA..1DFA
+ None, # 1DFB..202E
{'Latn', 'Mong'}, # 202F..202F
None, # 2030..20EF
{'Deva', 'Gran', 'Latn'}, # 20F0..20F0
@@ -393,7 +402,9 @@ VALUES = [
{'Kali', 'Latn', 'Mymr'}, # A92E..A92E
None, # A92F..A9CE
{'Bugi', 'Java'}, # A9CF..A9CF
- None, # A9D0..FDF1
+ None, # A9D0..FD3D
+ {'Arab', 'Nkoo'}, # FD3E..FD3F
+ None, # FD40..FDF1
{'Arab', 'Thaa'}, # FDF2..FDF2
None, # FDF3..FDFC
{'Arab', 'Thaa'}, # FDFD..FDFD
@@ -406,14 +417,17 @@ VALUES = [
None, # FF71..FF9D
{'Hira', 'Kana'}, # FF9E..FF9F
None, # FFA0..100FF
- {'Cprt', 'Linb'}, # 10100..10102
+ {'Cpmn', 'Cprt', 'Linb'}, # 10100..10101
+ {'Cprt', 'Linb'}, # 10102..10102
None, # 10103..10106
{'Cprt', 'Lina', 'Linb'}, # 10107..10133
None, # 10134..10136
{'Cprt', 'Linb'}, # 10137..1013F
None, # 10140..102DF
{'Arab', 'Copt'}, # 102E0..102FB
- None, # 102FC..11300
+ None, # 102FC..10AF1
+ {'Mani', 'Ougr'}, # 10AF2..10AF2
+ None, # 10AF3..11300
{'Gran', 'Taml'}, # 11301..11301
None, # 11302..11302
{'Gran', 'Taml'}, # 11303..11303
diff --git a/Lib/fontTools/unicodedata/Scripts.py b/Lib/fontTools/unicodedata/Scripts.py
index 12f9a0e3..18cada93 100644
--- a/Lib/fontTools/unicodedata/Scripts.py
+++ b/Lib/fontTools/unicodedata/Scripts.py
@@ -4,19 +4,19 @@
# Source: https://unicode.org/Public/UNIDATA/Scripts.txt
# License: http://unicode.org/copyright.html#License
#
-# Scripts-13.0.0.txt
-# Date: 2020-01-22, 00:07:43 GMT
-# © 2020 Unicode®, Inc.
+# Scripts-14.0.0.txt
+# Date: 2021-07-10, 00:35:31 GMT
+# © 2021 Unicode®, Inc.
# Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries.
# For terms of use, see http://www.unicode.org/terms_of_use.html
#
# Unicode Character Database
# For documentation, see http://www.unicode.org/reports/tr44/
# For more information, see:
-# UAX #24, Unicode Script Property: http://www.unicode.org/reports/tr24/
+# UAX #24, Unicode Script Property: https://www.unicode.org/reports/tr24/
# Especially the sections:
-# http://www.unicode.org/reports/tr24/#Assignment_Script_Values
-# http://www.unicode.org/reports/tr24/#Assignment_ScriptX_Values
+# https://www.unicode.org/reports/tr24/#Assignment_Script_Values
+# https://www.unicode.org/reports/tr24/#Assignment_ScriptX_Values
#
@@ -84,9 +84,7 @@ RANGES = [
0x060C, # .. 0x060C ; Common
0x060D, # .. 0x061A ; Arabic
0x061B, # .. 0x061B ; Common
- 0x061C, # .. 0x061C ; Arabic
- 0x061D, # .. 0x061D ; Unknown
- 0x061E, # .. 0x061E ; Arabic
+ 0x061C, # .. 0x061E ; Arabic
0x061F, # .. 0x061F ; Common
0x0620, # .. 0x063F ; Arabic
0x0640, # .. 0x0640 ; Common
@@ -117,12 +115,12 @@ RANGES = [
0x085E, # .. 0x085E ; Mandaic
0x085F, # .. 0x085F ; Unknown
0x0860, # .. 0x086A ; Syriac
- 0x086B, # .. 0x089F ; Unknown
- 0x08A0, # .. 0x08B4 ; Arabic
- 0x08B5, # .. 0x08B5 ; Unknown
- 0x08B6, # .. 0x08C7 ; Arabic
- 0x08C8, # .. 0x08D2 ; Unknown
- 0x08D3, # .. 0x08E1 ; Arabic
+ 0x086B, # .. 0x086F ; Unknown
+ 0x0870, # .. 0x088E ; Arabic
+ 0x088F, # .. 0x088F ; Unknown
+ 0x0890, # .. 0x0891 ; Arabic
+ 0x0892, # .. 0x0897 ; Unknown
+ 0x0898, # .. 0x08E1 ; Arabic
0x08E2, # .. 0x08E2 ; Common
0x08E3, # .. 0x08FF ; Arabic
0x0900, # .. 0x0950 ; Devanagari
@@ -285,8 +283,8 @@ RANGES = [
0x0C12, # .. 0x0C28 ; Telugu
0x0C29, # .. 0x0C29 ; Unknown
0x0C2A, # .. 0x0C39 ; Telugu
- 0x0C3A, # .. 0x0C3C ; Unknown
- 0x0C3D, # .. 0x0C44 ; Telugu
+ 0x0C3A, # .. 0x0C3B ; Unknown
+ 0x0C3C, # .. 0x0C44 ; Telugu
0x0C45, # .. 0x0C45 ; Unknown
0x0C46, # .. 0x0C48 ; Telugu
0x0C49, # .. 0x0C49 ; Unknown
@@ -295,7 +293,9 @@ RANGES = [
0x0C55, # .. 0x0C56 ; Telugu
0x0C57, # .. 0x0C57 ; Unknown
0x0C58, # .. 0x0C5A ; Telugu
- 0x0C5B, # .. 0x0C5F ; Unknown
+ 0x0C5B, # .. 0x0C5C ; Unknown
+ 0x0C5D, # .. 0x0C5D ; Telugu
+ 0x0C5E, # .. 0x0C5F ; Unknown
0x0C60, # .. 0x0C63 ; Telugu
0x0C64, # .. 0x0C65 ; Unknown
0x0C66, # .. 0x0C6F ; Telugu
@@ -318,8 +318,8 @@ RANGES = [
0x0CCA, # .. 0x0CCD ; Kannada
0x0CCE, # .. 0x0CD4 ; Unknown
0x0CD5, # .. 0x0CD6 ; Kannada
- 0x0CD7, # .. 0x0CDD ; Unknown
- 0x0CDE, # .. 0x0CDE ; Kannada
+ 0x0CD7, # .. 0x0CDC ; Unknown
+ 0x0CDD, # .. 0x0CDE ; Kannada
0x0CDF, # .. 0x0CDF ; Unknown
0x0CE0, # .. 0x0CE3 ; Kannada
0x0CE4, # .. 0x0CE5 ; Unknown
@@ -464,10 +464,9 @@ RANGES = [
0x16EB, # .. 0x16ED ; Common
0x16EE, # .. 0x16F8 ; Runic
0x16F9, # .. 0x16FF ; Unknown
- 0x1700, # .. 0x170C ; Tagalog
- 0x170D, # .. 0x170D ; Unknown
- 0x170E, # .. 0x1714 ; Tagalog
- 0x1715, # .. 0x171F ; Unknown
+ 0x1700, # .. 0x1715 ; Tagalog
+ 0x1716, # .. 0x171E ; Unknown
+ 0x171F, # .. 0x171F ; Tagalog
0x1720, # .. 0x1734 ; Hanunoo
0x1735, # .. 0x1736 ; Common
0x1737, # .. 0x173F ; Unknown
@@ -489,9 +488,7 @@ RANGES = [
0x1802, # .. 0x1803 ; Common
0x1804, # .. 0x1804 ; Mongolian
0x1805, # .. 0x1805 ; Common
- 0x1806, # .. 0x180E ; Mongolian
- 0x180F, # .. 0x180F ; Unknown
- 0x1810, # .. 0x1819 ; Mongolian
+ 0x1806, # .. 0x1819 ; Mongolian
0x181A, # .. 0x181F ; Unknown
0x1820, # .. 0x1878 ; Mongolian
0x1879, # .. 0x187F ; Unknown
@@ -533,12 +530,12 @@ RANGES = [
0x1A9A, # .. 0x1A9F ; Unknown
0x1AA0, # .. 0x1AAD ; Tai_Tham
0x1AAE, # .. 0x1AAF ; Unknown
- 0x1AB0, # .. 0x1AC0 ; Inherited
- 0x1AC1, # .. 0x1AFF ; Unknown
- 0x1B00, # .. 0x1B4B ; Balinese
- 0x1B4C, # .. 0x1B4F ; Unknown
- 0x1B50, # .. 0x1B7C ; Balinese
- 0x1B7D, # .. 0x1B7F ; Unknown
+ 0x1AB0, # .. 0x1ACE ; Inherited
+ 0x1ACF, # .. 0x1AFF ; Unknown
+ 0x1B00, # .. 0x1B4C ; Balinese
+ 0x1B4D, # .. 0x1B4F ; Unknown
+ 0x1B50, # .. 0x1B7E ; Balinese
+ 0x1B7F, # .. 0x1B7F ; Unknown
0x1B80, # .. 0x1BBF ; Sundanese
0x1BC0, # .. 0x1BF3 ; Batak
0x1BF4, # .. 0x1BFB ; Unknown
@@ -580,9 +577,7 @@ RANGES = [
0x1D78, # .. 0x1D78 ; Cyrillic
0x1D79, # .. 0x1DBE ; Latin
0x1DBF, # .. 0x1DBF ; Greek
- 0x1DC0, # .. 0x1DF9 ; Inherited
- 0x1DFA, # .. 0x1DFA ; Unknown
- 0x1DFB, # .. 0x1DFF ; Inherited
+ 0x1DC0, # .. 0x1DFF ; Inherited
0x1E00, # .. 0x1EFF ; Latin
0x1F00, # .. 0x1F15 ; Greek
0x1F16, # .. 0x1F17 ; Unknown
@@ -629,8 +624,8 @@ RANGES = [
0x208F, # .. 0x208F ; Unknown
0x2090, # .. 0x209C ; Latin
0x209D, # .. 0x209F ; Unknown
- 0x20A0, # .. 0x20BF ; Common
- 0x20C0, # .. 0x20CF ; Unknown
+ 0x20A0, # .. 0x20C0 ; Common
+ 0x20C1, # .. 0x20CF ; Unknown
0x20D0, # .. 0x20F0 ; Inherited
0x20F1, # .. 0x20FF ; Unknown
0x2100, # .. 0x2125 ; Common
@@ -656,10 +651,7 @@ RANGES = [
0x2B76, # .. 0x2B95 ; Common
0x2B96, # .. 0x2B96 ; Unknown
0x2B97, # .. 0x2BFF ; Common
- 0x2C00, # .. 0x2C2E ; Glagolitic
- 0x2C2F, # .. 0x2C2F ; Unknown
- 0x2C30, # .. 0x2C5E ; Glagolitic
- 0x2C5F, # .. 0x2C5F ; Unknown
+ 0x2C00, # .. 0x2C5F ; Glagolitic
0x2C60, # .. 0x2C7F ; Latin
0x2C80, # .. 0x2CF3 ; Coptic
0x2CF4, # .. 0x2CF8 ; Unknown
@@ -694,8 +686,8 @@ RANGES = [
0x2DD8, # .. 0x2DDE ; Ethiopic
0x2DDF, # .. 0x2DDF ; Unknown
0x2DE0, # .. 0x2DFF ; Cyrillic
- 0x2E00, # .. 0x2E52 ; Common
- 0x2E53, # .. 0x2E7F ; Unknown
+ 0x2E00, # .. 0x2E5D ; Common
+ 0x2E5E, # .. 0x2E7F ; Unknown
0x2E80, # .. 0x2E99 ; Han
0x2E9A, # .. 0x2E9A ; Unknown
0x2E9B, # .. 0x2EF3 ; Han
@@ -746,8 +738,7 @@ RANGES = [
0x3358, # .. 0x33FF ; Common
0x3400, # .. 0x4DBF ; Han
0x4DC0, # .. 0x4DFF ; Common
- 0x4E00, # .. 0x9FFC ; Han
- 0x9FFD, # .. 0x9FFF ; Unknown
+ 0x4E00, # .. 0x9FFF ; Han
0xA000, # .. 0xA48C ; Yi
0xA48D, # .. 0xA48F ; Unknown
0xA490, # .. 0xA4C6 ; Yi
@@ -761,11 +752,15 @@ RANGES = [
0xA700, # .. 0xA721 ; Common
0xA722, # .. 0xA787 ; Latin
0xA788, # .. 0xA78A ; Common
- 0xA78B, # .. 0xA7BF ; Latin
- 0xA7C0, # .. 0xA7C1 ; Unknown
- 0xA7C2, # .. 0xA7CA ; Latin
- 0xA7CB, # .. 0xA7F4 ; Unknown
- 0xA7F5, # .. 0xA7FF ; Latin
+ 0xA78B, # .. 0xA7CA ; Latin
+ 0xA7CB, # .. 0xA7CF ; Unknown
+ 0xA7D0, # .. 0xA7D1 ; Latin
+ 0xA7D2, # .. 0xA7D2 ; Unknown
+ 0xA7D3, # .. 0xA7D3 ; Latin
+ 0xA7D4, # .. 0xA7D4 ; Unknown
+ 0xA7D5, # .. 0xA7D9 ; Latin
+ 0xA7DA, # .. 0xA7F1 ; Unknown
+ 0xA7F2, # .. 0xA7FF ; Latin
0xA800, # .. 0xA82C ; Syloti_Nagri
0xA82D, # .. 0xA82F ; Unknown
0xA830, # .. 0xA839 ; Common
@@ -853,17 +848,17 @@ RANGES = [
0xFB43, # .. 0xFB44 ; Hebrew
0xFB45, # .. 0xFB45 ; Unknown
0xFB46, # .. 0xFB4F ; Hebrew
- 0xFB50, # .. 0xFBC1 ; Arabic
- 0xFBC2, # .. 0xFBD2 ; Unknown
+ 0xFB50, # .. 0xFBC2 ; Arabic
+ 0xFBC3, # .. 0xFBD2 ; Unknown
0xFBD3, # .. 0xFD3D ; Arabic
0xFD3E, # .. 0xFD3F ; Common
- 0xFD40, # .. 0xFD4F ; Unknown
- 0xFD50, # .. 0xFD8F ; Arabic
+ 0xFD40, # .. 0xFD8F ; Arabic
0xFD90, # .. 0xFD91 ; Unknown
0xFD92, # .. 0xFDC7 ; Arabic
- 0xFDC8, # .. 0xFDEF ; Unknown
- 0xFDF0, # .. 0xFDFD ; Arabic
- 0xFDFE, # .. 0xFDFF ; Unknown
+ 0xFDC8, # .. 0xFDCE ; Unknown
+ 0xFDCF, # .. 0xFDCF ; Arabic
+ 0xFDD0, # .. 0xFDEF ; Unknown
+ 0xFDF0, # .. 0xFDFF ; Arabic
0xFE00, # .. 0xFE0F ; Inherited
0xFE10, # .. 0xFE19 ; Common
0xFE1A, # .. 0xFE1F ; Unknown
@@ -970,13 +965,34 @@ RANGES = [
0x10530, # .. 0x10563 ; Caucasian_Albanian
0x10564, # .. 0x1056E ; Unknown
0x1056F, # .. 0x1056F ; Caucasian_Albanian
- 0x10570, # .. 0x105FF ; Unknown
+ 0x10570, # .. 0x1057A ; Vithkuqi
+ 0x1057B, # .. 0x1057B ; Unknown
+ 0x1057C, # .. 0x1058A ; Vithkuqi
+ 0x1058B, # .. 0x1058B ; Unknown
+ 0x1058C, # .. 0x10592 ; Vithkuqi
+ 0x10593, # .. 0x10593 ; Unknown
+ 0x10594, # .. 0x10595 ; Vithkuqi
+ 0x10596, # .. 0x10596 ; Unknown
+ 0x10597, # .. 0x105A1 ; Vithkuqi
+ 0x105A2, # .. 0x105A2 ; Unknown
+ 0x105A3, # .. 0x105B1 ; Vithkuqi
+ 0x105B2, # .. 0x105B2 ; Unknown
+ 0x105B3, # .. 0x105B9 ; Vithkuqi
+ 0x105BA, # .. 0x105BA ; Unknown
+ 0x105BB, # .. 0x105BC ; Vithkuqi
+ 0x105BD, # .. 0x105FF ; Unknown
0x10600, # .. 0x10736 ; Linear_A
0x10737, # .. 0x1073F ; Unknown
0x10740, # .. 0x10755 ; Linear_A
0x10756, # .. 0x1075F ; Unknown
0x10760, # .. 0x10767 ; Linear_A
- 0x10768, # .. 0x107FF ; Unknown
+ 0x10768, # .. 0x1077F ; Unknown
+ 0x10780, # .. 0x10785 ; Latin
+ 0x10786, # .. 0x10786 ; Unknown
+ 0x10787, # .. 0x107B0 ; Latin
+ 0x107B1, # .. 0x107B1 ; Unknown
+ 0x107B2, # .. 0x107BA ; Latin
+ 0x107BB, # .. 0x107FF ; Unknown
0x10800, # .. 0x10805 ; Cypriot
0x10806, # .. 0x10807 ; Unknown
0x10808, # .. 0x10808 ; Cypriot
@@ -1074,18 +1090,20 @@ RANGES = [
0x10F00, # .. 0x10F27 ; Old_Sogdian
0x10F28, # .. 0x10F2F ; Unknown
0x10F30, # .. 0x10F59 ; Sogdian
- 0x10F5A, # .. 0x10FAF ; Unknown
+ 0x10F5A, # .. 0x10F6F ; Unknown
+ 0x10F70, # .. 0x10F89 ; Old_Uyghur
+ 0x10F8A, # .. 0x10FAF ; Unknown
0x10FB0, # .. 0x10FCB ; Chorasmian
0x10FCC, # .. 0x10FDF ; Unknown
0x10FE0, # .. 0x10FF6 ; Elymaic
0x10FF7, # .. 0x10FFF ; Unknown
0x11000, # .. 0x1104D ; Brahmi
0x1104E, # .. 0x11051 ; Unknown
- 0x11052, # .. 0x1106F ; Brahmi
- 0x11070, # .. 0x1107E ; Unknown
+ 0x11052, # .. 0x11075 ; Brahmi
+ 0x11076, # .. 0x1107E ; Unknown
0x1107F, # .. 0x1107F ; Brahmi
- 0x11080, # .. 0x110C1 ; Kaithi
- 0x110C2, # .. 0x110CC ; Unknown
+ 0x11080, # .. 0x110C2 ; Kaithi
+ 0x110C3, # .. 0x110CC ; Unknown
0x110CD, # .. 0x110CD ; Kaithi
0x110CE, # .. 0x110CF ; Unknown
0x110D0, # .. 0x110E8 ; Sora_Sompeng
@@ -1169,16 +1187,16 @@ RANGES = [
0x1165A, # .. 0x1165F ; Unknown
0x11660, # .. 0x1166C ; Mongolian
0x1166D, # .. 0x1167F ; Unknown
- 0x11680, # .. 0x116B8 ; Takri
- 0x116B9, # .. 0x116BF ; Unknown
+ 0x11680, # .. 0x116B9 ; Takri
+ 0x116BA, # .. 0x116BF ; Unknown
0x116C0, # .. 0x116C9 ; Takri
0x116CA, # .. 0x116FF ; Unknown
0x11700, # .. 0x1171A ; Ahom
0x1171B, # .. 0x1171C ; Unknown
0x1171D, # .. 0x1172B ; Ahom
0x1172C, # .. 0x1172F ; Unknown
- 0x11730, # .. 0x1173F ; Ahom
- 0x11740, # .. 0x117FF ; Unknown
+ 0x11730, # .. 0x11746 ; Ahom
+ 0x11747, # .. 0x117FF ; Unknown
0x11800, # .. 0x1183B ; Dogra
0x1183C, # .. 0x1189F ; Unknown
0x118A0, # .. 0x118F2 ; Warang_Citi
@@ -1209,7 +1227,8 @@ RANGES = [
0x11A00, # .. 0x11A47 ; Zanabazar_Square
0x11A48, # .. 0x11A4F ; Unknown
0x11A50, # .. 0x11AA2 ; Soyombo
- 0x11AA3, # .. 0x11ABF ; Unknown
+ 0x11AA3, # .. 0x11AAF ; Unknown
+ 0x11AB0, # .. 0x11ABF ; Canadian_Aboriginal
0x11AC0, # .. 0x11AF8 ; Pau_Cin_Hau
0x11AF9, # .. 0x11BFF ; Unknown
0x11C00, # .. 0x11C08 ; Bhaiksuki
@@ -1266,7 +1285,9 @@ RANGES = [
0x12470, # .. 0x12474 ; Cuneiform
0x12475, # .. 0x1247F ; Unknown
0x12480, # .. 0x12543 ; Cuneiform
- 0x12544, # .. 0x12FFF ; Unknown
+ 0x12544, # .. 0x12F8F ; Unknown
+ 0x12F90, # .. 0x12FF2 ; Cypro_Minoan
+ 0x12FF3, # .. 0x12FFF ; Unknown
0x13000, # .. 0x1342E ; Egyptian_Hieroglyphs
0x1342F, # .. 0x1342F ; Unknown
0x13430, # .. 0x13438 ; Egyptian_Hieroglyphs
@@ -1280,7 +1301,10 @@ RANGES = [
0x16A60, # .. 0x16A69 ; Mro
0x16A6A, # .. 0x16A6D ; Unknown
0x16A6E, # .. 0x16A6F ; Mro
- 0x16A70, # .. 0x16ACF ; Unknown
+ 0x16A70, # .. 0x16ABE ; Tangsa
+ 0x16ABF, # .. 0x16ABF ; Unknown
+ 0x16AC0, # .. 0x16AC9 ; Tangsa
+ 0x16ACA, # .. 0x16ACF ; Unknown
0x16AD0, # .. 0x16AED ; Bassa_Vah
0x16AEE, # .. 0x16AEF ; Unknown
0x16AF0, # .. 0x16AF5 ; Bassa_Vah
@@ -1305,7 +1329,7 @@ RANGES = [
0x16FA0, # .. 0x16FDF ; Unknown
0x16FE0, # .. 0x16FE0 ; Tangut
0x16FE1, # .. 0x16FE1 ; Nushu
- 0x16FE2, # .. 0x16FE3 ; Common
+ 0x16FE2, # .. 0x16FE3 ; Han
0x16FE4, # .. 0x16FE4 ; Khitan_Small_Script
0x16FE5, # .. 0x16FEF ; Unknown
0x16FF0, # .. 0x16FF1 ; Han
@@ -1316,10 +1340,17 @@ RANGES = [
0x18B00, # .. 0x18CD5 ; Khitan_Small_Script
0x18CD6, # .. 0x18CFF ; Unknown
0x18D00, # .. 0x18D08 ; Tangut
- 0x18D09, # .. 0x1AFFF ; Unknown
+ 0x18D09, # .. 0x1AFEF ; Unknown
+ 0x1AFF0, # .. 0x1AFF3 ; Katakana
+ 0x1AFF4, # .. 0x1AFF4 ; Unknown
+ 0x1AFF5, # .. 0x1AFFB ; Katakana
+ 0x1AFFC, # .. 0x1AFFC ; Unknown
+ 0x1AFFD, # .. 0x1AFFE ; Katakana
+ 0x1AFFF, # .. 0x1AFFF ; Unknown
0x1B000, # .. 0x1B000 ; Katakana
- 0x1B001, # .. 0x1B11E ; Hiragana
- 0x1B11F, # .. 0x1B14F ; Unknown
+ 0x1B001, # .. 0x1B11F ; Hiragana
+ 0x1B120, # .. 0x1B122 ; Katakana
+ 0x1B123, # .. 0x1B14F ; Unknown
0x1B150, # .. 0x1B152 ; Hiragana
0x1B153, # .. 0x1B163 ; Unknown
0x1B164, # .. 0x1B167 ; Katakana
@@ -1336,7 +1367,13 @@ RANGES = [
0x1BC9A, # .. 0x1BC9B ; Unknown
0x1BC9C, # .. 0x1BC9F ; Duployan
0x1BCA0, # .. 0x1BCA3 ; Common
- 0x1BCA4, # .. 0x1CFFF ; Unknown
+ 0x1BCA4, # .. 0x1CEFF ; Unknown
+ 0x1CF00, # .. 0x1CF2D ; Inherited
+ 0x1CF2E, # .. 0x1CF2F ; Unknown
+ 0x1CF30, # .. 0x1CF46 ; Inherited
+ 0x1CF47, # .. 0x1CF4F ; Unknown
+ 0x1CF50, # .. 0x1CFC3 ; Common
+ 0x1CFC4, # .. 0x1CFFF ; Unknown
0x1D000, # .. 0x1D0F5 ; Common
0x1D0F6, # .. 0x1D0FF ; Unknown
0x1D100, # .. 0x1D126 ; Common
@@ -1349,8 +1386,8 @@ RANGES = [
0x1D185, # .. 0x1D18B ; Inherited
0x1D18C, # .. 0x1D1A9 ; Common
0x1D1AA, # .. 0x1D1AD ; Inherited
- 0x1D1AE, # .. 0x1D1E8 ; Common
- 0x1D1E9, # .. 0x1D1FF ; Unknown
+ 0x1D1AE, # .. 0x1D1EA ; Common
+ 0x1D1EB, # .. 0x1D1FF ; Unknown
0x1D200, # .. 0x1D245 ; Greek
0x1D246, # .. 0x1D2DF ; Unknown
0x1D2E0, # .. 0x1D2F3 ; Common
@@ -1405,7 +1442,9 @@ RANGES = [
0x1DA9B, # .. 0x1DA9F ; SignWriting
0x1DAA0, # .. 0x1DAA0 ; Unknown
0x1DAA1, # .. 0x1DAAF ; SignWriting
- 0x1DAB0, # .. 0x1DFFF ; Unknown
+ 0x1DAB0, # .. 0x1DEFF ; Unknown
+ 0x1DF00, # .. 0x1DF1E ; Latin
+ 0x1DF1F, # .. 0x1DFFF ; Unknown
0x1E000, # .. 0x1E006 ; Glagolitic
0x1E007, # .. 0x1E007 ; Unknown
0x1E008, # .. 0x1E018 ; Glagolitic
@@ -1423,11 +1462,21 @@ RANGES = [
0x1E140, # .. 0x1E149 ; Nyiakeng_Puachue_Hmong
0x1E14A, # .. 0x1E14D ; Unknown
0x1E14E, # .. 0x1E14F ; Nyiakeng_Puachue_Hmong
- 0x1E150, # .. 0x1E2BF ; Unknown
+ 0x1E150, # .. 0x1E28F ; Unknown
+ 0x1E290, # .. 0x1E2AE ; Toto
+ 0x1E2AF, # .. 0x1E2BF ; Unknown
0x1E2C0, # .. 0x1E2F9 ; Wancho
0x1E2FA, # .. 0x1E2FE ; Unknown
0x1E2FF, # .. 0x1E2FF ; Wancho
- 0x1E300, # .. 0x1E7FF ; Unknown
+ 0x1E300, # .. 0x1E7DF ; Unknown
+ 0x1E7E0, # .. 0x1E7E6 ; Ethiopic
+ 0x1E7E7, # .. 0x1E7E7 ; Unknown
+ 0x1E7E8, # .. 0x1E7EB ; Ethiopic
+ 0x1E7EC, # .. 0x1E7EC ; Unknown
+ 0x1E7ED, # .. 0x1E7EE ; Ethiopic
+ 0x1E7EF, # .. 0x1E7EF ; Unknown
+ 0x1E7F0, # .. 0x1E7FE ; Ethiopic
+ 0x1E7FF, # .. 0x1E7FF ; Unknown
0x1E800, # .. 0x1E8C4 ; Mende_Kikakui
0x1E8C5, # .. 0x1E8C6 ; Unknown
0x1E8C7, # .. 0x1E8D6 ; Mende_Kikakui
@@ -1537,8 +1586,8 @@ RANGES = [
0x1F260, # .. 0x1F265 ; Common
0x1F266, # .. 0x1F2FF ; Unknown
0x1F300, # .. 0x1F6D7 ; Common
- 0x1F6D8, # .. 0x1F6DF ; Unknown
- 0x1F6E0, # .. 0x1F6EC ; Common
+ 0x1F6D8, # .. 0x1F6DC ; Unknown
+ 0x1F6DD, # .. 0x1F6EC ; Common
0x1F6ED, # .. 0x1F6EF ; Unknown
0x1F6F0, # .. 0x1F6FC ; Common
0x1F6FD, # .. 0x1F6FF ; Unknown
@@ -1547,7 +1596,9 @@ RANGES = [
0x1F780, # .. 0x1F7D8 ; Common
0x1F7D9, # .. 0x1F7DF ; Unknown
0x1F7E0, # .. 0x1F7EB ; Common
- 0x1F7EC, # .. 0x1F7FF ; Unknown
+ 0x1F7EC, # .. 0x1F7EF ; Unknown
+ 0x1F7F0, # .. 0x1F7F0 ; Common
+ 0x1F7F1, # .. 0x1F7FF ; Unknown
0x1F800, # .. 0x1F80B ; Common
0x1F80C, # .. 0x1F80F ; Unknown
0x1F810, # .. 0x1F847 ; Common
@@ -1560,38 +1611,38 @@ RANGES = [
0x1F8AE, # .. 0x1F8AF ; Unknown
0x1F8B0, # .. 0x1F8B1 ; Common
0x1F8B2, # .. 0x1F8FF ; Unknown
- 0x1F900, # .. 0x1F978 ; Common
- 0x1F979, # .. 0x1F979 ; Unknown
- 0x1F97A, # .. 0x1F9CB ; Common
- 0x1F9CC, # .. 0x1F9CC ; Unknown
- 0x1F9CD, # .. 0x1FA53 ; Common
+ 0x1F900, # .. 0x1FA53 ; Common
0x1FA54, # .. 0x1FA5F ; Unknown
0x1FA60, # .. 0x1FA6D ; Common
0x1FA6E, # .. 0x1FA6F ; Unknown
0x1FA70, # .. 0x1FA74 ; Common
0x1FA75, # .. 0x1FA77 ; Unknown
- 0x1FA78, # .. 0x1FA7A ; Common
- 0x1FA7B, # .. 0x1FA7F ; Unknown
+ 0x1FA78, # .. 0x1FA7C ; Common
+ 0x1FA7D, # .. 0x1FA7F ; Unknown
0x1FA80, # .. 0x1FA86 ; Common
0x1FA87, # .. 0x1FA8F ; Unknown
- 0x1FA90, # .. 0x1FAA8 ; Common
- 0x1FAA9, # .. 0x1FAAF ; Unknown
- 0x1FAB0, # .. 0x1FAB6 ; Common
- 0x1FAB7, # .. 0x1FABF ; Unknown
- 0x1FAC0, # .. 0x1FAC2 ; Common
- 0x1FAC3, # .. 0x1FACF ; Unknown
- 0x1FAD0, # .. 0x1FAD6 ; Common
- 0x1FAD7, # .. 0x1FAFF ; Unknown
+ 0x1FA90, # .. 0x1FAAC ; Common
+ 0x1FAAD, # .. 0x1FAAF ; Unknown
+ 0x1FAB0, # .. 0x1FABA ; Common
+ 0x1FABB, # .. 0x1FABF ; Unknown
+ 0x1FAC0, # .. 0x1FAC5 ; Common
+ 0x1FAC6, # .. 0x1FACF ; Unknown
+ 0x1FAD0, # .. 0x1FAD9 ; Common
+ 0x1FADA, # .. 0x1FADF ; Unknown
+ 0x1FAE0, # .. 0x1FAE7 ; Common
+ 0x1FAE8, # .. 0x1FAEF ; Unknown
+ 0x1FAF0, # .. 0x1FAF6 ; Common
+ 0x1FAF7, # .. 0x1FAFF ; Unknown
0x1FB00, # .. 0x1FB92 ; Common
0x1FB93, # .. 0x1FB93 ; Unknown
0x1FB94, # .. 0x1FBCA ; Common
0x1FBCB, # .. 0x1FBEF ; Unknown
0x1FBF0, # .. 0x1FBF9 ; Common
0x1FBFA, # .. 0x1FFFF ; Unknown
- 0x20000, # .. 0x2A6DD ; Han
- 0x2A6DE, # .. 0x2A6FF ; Unknown
- 0x2A700, # .. 0x2B734 ; Han
- 0x2B735, # .. 0x2B73F ; Unknown
+ 0x20000, # .. 0x2A6DF ; Han
+ 0x2A6E0, # .. 0x2A6FF ; Unknown
+ 0x2A700, # .. 0x2B738 ; Han
+ 0x2B739, # .. 0x2B73F ; Unknown
0x2B740, # .. 0x2B81D ; Han
0x2B81E, # .. 0x2B81F ; Unknown
0x2B820, # .. 0x2CEA1 ; Han
@@ -1674,9 +1725,7 @@ VALUES = [
'Zyyy', # 060C..060C ; Common
'Arab', # 060D..061A ; Arabic
'Zyyy', # 061B..061B ; Common
- 'Arab', # 061C..061C ; Arabic
- 'Zzzz', # 061D..061D ; Unknown
- 'Arab', # 061E..061E ; Arabic
+ 'Arab', # 061C..061E ; Arabic
'Zyyy', # 061F..061F ; Common
'Arab', # 0620..063F ; Arabic
'Zyyy', # 0640..0640 ; Common
@@ -1707,12 +1756,12 @@ VALUES = [
'Mand', # 085E..085E ; Mandaic
'Zzzz', # 085F..085F ; Unknown
'Syrc', # 0860..086A ; Syriac
- 'Zzzz', # 086B..089F ; Unknown
- 'Arab', # 08A0..08B4 ; Arabic
- 'Zzzz', # 08B5..08B5 ; Unknown
- 'Arab', # 08B6..08C7 ; Arabic
- 'Zzzz', # 08C8..08D2 ; Unknown
- 'Arab', # 08D3..08E1 ; Arabic
+ 'Zzzz', # 086B..086F ; Unknown
+ 'Arab', # 0870..088E ; Arabic
+ 'Zzzz', # 088F..088F ; Unknown
+ 'Arab', # 0890..0891 ; Arabic
+ 'Zzzz', # 0892..0897 ; Unknown
+ 'Arab', # 0898..08E1 ; Arabic
'Zyyy', # 08E2..08E2 ; Common
'Arab', # 08E3..08FF ; Arabic
'Deva', # 0900..0950 ; Devanagari
@@ -1875,8 +1924,8 @@ VALUES = [
'Telu', # 0C12..0C28 ; Telugu
'Zzzz', # 0C29..0C29 ; Unknown
'Telu', # 0C2A..0C39 ; Telugu
- 'Zzzz', # 0C3A..0C3C ; Unknown
- 'Telu', # 0C3D..0C44 ; Telugu
+ 'Zzzz', # 0C3A..0C3B ; Unknown
+ 'Telu', # 0C3C..0C44 ; Telugu
'Zzzz', # 0C45..0C45 ; Unknown
'Telu', # 0C46..0C48 ; Telugu
'Zzzz', # 0C49..0C49 ; Unknown
@@ -1885,7 +1934,9 @@ VALUES = [
'Telu', # 0C55..0C56 ; Telugu
'Zzzz', # 0C57..0C57 ; Unknown
'Telu', # 0C58..0C5A ; Telugu
- 'Zzzz', # 0C5B..0C5F ; Unknown
+ 'Zzzz', # 0C5B..0C5C ; Unknown
+ 'Telu', # 0C5D..0C5D ; Telugu
+ 'Zzzz', # 0C5E..0C5F ; Unknown
'Telu', # 0C60..0C63 ; Telugu
'Zzzz', # 0C64..0C65 ; Unknown
'Telu', # 0C66..0C6F ; Telugu
@@ -1908,8 +1959,8 @@ VALUES = [
'Knda', # 0CCA..0CCD ; Kannada
'Zzzz', # 0CCE..0CD4 ; Unknown
'Knda', # 0CD5..0CD6 ; Kannada
- 'Zzzz', # 0CD7..0CDD ; Unknown
- 'Knda', # 0CDE..0CDE ; Kannada
+ 'Zzzz', # 0CD7..0CDC ; Unknown
+ 'Knda', # 0CDD..0CDE ; Kannada
'Zzzz', # 0CDF..0CDF ; Unknown
'Knda', # 0CE0..0CE3 ; Kannada
'Zzzz', # 0CE4..0CE5 ; Unknown
@@ -2054,10 +2105,9 @@ VALUES = [
'Zyyy', # 16EB..16ED ; Common
'Runr', # 16EE..16F8 ; Runic
'Zzzz', # 16F9..16FF ; Unknown
- 'Tglg', # 1700..170C ; Tagalog
- 'Zzzz', # 170D..170D ; Unknown
- 'Tglg', # 170E..1714 ; Tagalog
- 'Zzzz', # 1715..171F ; Unknown
+ 'Tglg', # 1700..1715 ; Tagalog
+ 'Zzzz', # 1716..171E ; Unknown
+ 'Tglg', # 171F..171F ; Tagalog
'Hano', # 1720..1734 ; Hanunoo
'Zyyy', # 1735..1736 ; Common
'Zzzz', # 1737..173F ; Unknown
@@ -2079,9 +2129,7 @@ VALUES = [
'Zyyy', # 1802..1803 ; Common
'Mong', # 1804..1804 ; Mongolian
'Zyyy', # 1805..1805 ; Common
- 'Mong', # 1806..180E ; Mongolian
- 'Zzzz', # 180F..180F ; Unknown
- 'Mong', # 1810..1819 ; Mongolian
+ 'Mong', # 1806..1819 ; Mongolian
'Zzzz', # 181A..181F ; Unknown
'Mong', # 1820..1878 ; Mongolian
'Zzzz', # 1879..187F ; Unknown
@@ -2123,12 +2171,12 @@ VALUES = [
'Zzzz', # 1A9A..1A9F ; Unknown
'Lana', # 1AA0..1AAD ; Tai_Tham
'Zzzz', # 1AAE..1AAF ; Unknown
- 'Zinh', # 1AB0..1AC0 ; Inherited
- 'Zzzz', # 1AC1..1AFF ; Unknown
- 'Bali', # 1B00..1B4B ; Balinese
- 'Zzzz', # 1B4C..1B4F ; Unknown
- 'Bali', # 1B50..1B7C ; Balinese
- 'Zzzz', # 1B7D..1B7F ; Unknown
+ 'Zinh', # 1AB0..1ACE ; Inherited
+ 'Zzzz', # 1ACF..1AFF ; Unknown
+ 'Bali', # 1B00..1B4C ; Balinese
+ 'Zzzz', # 1B4D..1B4F ; Unknown
+ 'Bali', # 1B50..1B7E ; Balinese
+ 'Zzzz', # 1B7F..1B7F ; Unknown
'Sund', # 1B80..1BBF ; Sundanese
'Batk', # 1BC0..1BF3 ; Batak
'Zzzz', # 1BF4..1BFB ; Unknown
@@ -2170,9 +2218,7 @@ VALUES = [
'Cyrl', # 1D78..1D78 ; Cyrillic
'Latn', # 1D79..1DBE ; Latin
'Grek', # 1DBF..1DBF ; Greek
- 'Zinh', # 1DC0..1DF9 ; Inherited
- 'Zzzz', # 1DFA..1DFA ; Unknown
- 'Zinh', # 1DFB..1DFF ; Inherited
+ 'Zinh', # 1DC0..1DFF ; Inherited
'Latn', # 1E00..1EFF ; Latin
'Grek', # 1F00..1F15 ; Greek
'Zzzz', # 1F16..1F17 ; Unknown
@@ -2219,8 +2265,8 @@ VALUES = [
'Zzzz', # 208F..208F ; Unknown
'Latn', # 2090..209C ; Latin
'Zzzz', # 209D..209F ; Unknown
- 'Zyyy', # 20A0..20BF ; Common
- 'Zzzz', # 20C0..20CF ; Unknown
+ 'Zyyy', # 20A0..20C0 ; Common
+ 'Zzzz', # 20C1..20CF ; Unknown
'Zinh', # 20D0..20F0 ; Inherited
'Zzzz', # 20F1..20FF ; Unknown
'Zyyy', # 2100..2125 ; Common
@@ -2246,10 +2292,7 @@ VALUES = [
'Zyyy', # 2B76..2B95 ; Common
'Zzzz', # 2B96..2B96 ; Unknown
'Zyyy', # 2B97..2BFF ; Common
- 'Glag', # 2C00..2C2E ; Glagolitic
- 'Zzzz', # 2C2F..2C2F ; Unknown
- 'Glag', # 2C30..2C5E ; Glagolitic
- 'Zzzz', # 2C5F..2C5F ; Unknown
+ 'Glag', # 2C00..2C5F ; Glagolitic
'Latn', # 2C60..2C7F ; Latin
'Copt', # 2C80..2CF3 ; Coptic
'Zzzz', # 2CF4..2CF8 ; Unknown
@@ -2284,8 +2327,8 @@ VALUES = [
'Ethi', # 2DD8..2DDE ; Ethiopic
'Zzzz', # 2DDF..2DDF ; Unknown
'Cyrl', # 2DE0..2DFF ; Cyrillic
- 'Zyyy', # 2E00..2E52 ; Common
- 'Zzzz', # 2E53..2E7F ; Unknown
+ 'Zyyy', # 2E00..2E5D ; Common
+ 'Zzzz', # 2E5E..2E7F ; Unknown
'Hani', # 2E80..2E99 ; Han
'Zzzz', # 2E9A..2E9A ; Unknown
'Hani', # 2E9B..2EF3 ; Han
@@ -2336,8 +2379,7 @@ VALUES = [
'Zyyy', # 3358..33FF ; Common
'Hani', # 3400..4DBF ; Han
'Zyyy', # 4DC0..4DFF ; Common
- 'Hani', # 4E00..9FFC ; Han
- 'Zzzz', # 9FFD..9FFF ; Unknown
+ 'Hani', # 4E00..9FFF ; Han
'Yiii', # A000..A48C ; Yi
'Zzzz', # A48D..A48F ; Unknown
'Yiii', # A490..A4C6 ; Yi
@@ -2351,11 +2393,15 @@ VALUES = [
'Zyyy', # A700..A721 ; Common
'Latn', # A722..A787 ; Latin
'Zyyy', # A788..A78A ; Common
- 'Latn', # A78B..A7BF ; Latin
- 'Zzzz', # A7C0..A7C1 ; Unknown
- 'Latn', # A7C2..A7CA ; Latin
- 'Zzzz', # A7CB..A7F4 ; Unknown
- 'Latn', # A7F5..A7FF ; Latin
+ 'Latn', # A78B..A7CA ; Latin
+ 'Zzzz', # A7CB..A7CF ; Unknown
+ 'Latn', # A7D0..A7D1 ; Latin
+ 'Zzzz', # A7D2..A7D2 ; Unknown
+ 'Latn', # A7D3..A7D3 ; Latin
+ 'Zzzz', # A7D4..A7D4 ; Unknown
+ 'Latn', # A7D5..A7D9 ; Latin
+ 'Zzzz', # A7DA..A7F1 ; Unknown
+ 'Latn', # A7F2..A7FF ; Latin
'Sylo', # A800..A82C ; Syloti_Nagri
'Zzzz', # A82D..A82F ; Unknown
'Zyyy', # A830..A839 ; Common
@@ -2443,17 +2489,17 @@ VALUES = [
'Hebr', # FB43..FB44 ; Hebrew
'Zzzz', # FB45..FB45 ; Unknown
'Hebr', # FB46..FB4F ; Hebrew
- 'Arab', # FB50..FBC1 ; Arabic
- 'Zzzz', # FBC2..FBD2 ; Unknown
+ 'Arab', # FB50..FBC2 ; Arabic
+ 'Zzzz', # FBC3..FBD2 ; Unknown
'Arab', # FBD3..FD3D ; Arabic
'Zyyy', # FD3E..FD3F ; Common
- 'Zzzz', # FD40..FD4F ; Unknown
- 'Arab', # FD50..FD8F ; Arabic
+ 'Arab', # FD40..FD8F ; Arabic
'Zzzz', # FD90..FD91 ; Unknown
'Arab', # FD92..FDC7 ; Arabic
- 'Zzzz', # FDC8..FDEF ; Unknown
- 'Arab', # FDF0..FDFD ; Arabic
- 'Zzzz', # FDFE..FDFF ; Unknown
+ 'Zzzz', # FDC8..FDCE ; Unknown
+ 'Arab', # FDCF..FDCF ; Arabic
+ 'Zzzz', # FDD0..FDEF ; Unknown
+ 'Arab', # FDF0..FDFF ; Arabic
'Zinh', # FE00..FE0F ; Inherited
'Zyyy', # FE10..FE19 ; Common
'Zzzz', # FE1A..FE1F ; Unknown
@@ -2560,13 +2606,34 @@ VALUES = [
'Aghb', # 10530..10563 ; Caucasian_Albanian
'Zzzz', # 10564..1056E ; Unknown
'Aghb', # 1056F..1056F ; Caucasian_Albanian
- 'Zzzz', # 10570..105FF ; Unknown
+ 'Vith', # 10570..1057A ; Vithkuqi
+ 'Zzzz', # 1057B..1057B ; Unknown
+ 'Vith', # 1057C..1058A ; Vithkuqi
+ 'Zzzz', # 1058B..1058B ; Unknown
+ 'Vith', # 1058C..10592 ; Vithkuqi
+ 'Zzzz', # 10593..10593 ; Unknown
+ 'Vith', # 10594..10595 ; Vithkuqi
+ 'Zzzz', # 10596..10596 ; Unknown
+ 'Vith', # 10597..105A1 ; Vithkuqi
+ 'Zzzz', # 105A2..105A2 ; Unknown
+ 'Vith', # 105A3..105B1 ; Vithkuqi
+ 'Zzzz', # 105B2..105B2 ; Unknown
+ 'Vith', # 105B3..105B9 ; Vithkuqi
+ 'Zzzz', # 105BA..105BA ; Unknown
+ 'Vith', # 105BB..105BC ; Vithkuqi
+ 'Zzzz', # 105BD..105FF ; Unknown
'Lina', # 10600..10736 ; Linear_A
'Zzzz', # 10737..1073F ; Unknown
'Lina', # 10740..10755 ; Linear_A
'Zzzz', # 10756..1075F ; Unknown
'Lina', # 10760..10767 ; Linear_A
- 'Zzzz', # 10768..107FF ; Unknown
+ 'Zzzz', # 10768..1077F ; Unknown
+ 'Latn', # 10780..10785 ; Latin
+ 'Zzzz', # 10786..10786 ; Unknown
+ 'Latn', # 10787..107B0 ; Latin
+ 'Zzzz', # 107B1..107B1 ; Unknown
+ 'Latn', # 107B2..107BA ; Latin
+ 'Zzzz', # 107BB..107FF ; Unknown
'Cprt', # 10800..10805 ; Cypriot
'Zzzz', # 10806..10807 ; Unknown
'Cprt', # 10808..10808 ; Cypriot
@@ -2664,18 +2731,20 @@ VALUES = [
'Sogo', # 10F00..10F27 ; Old_Sogdian
'Zzzz', # 10F28..10F2F ; Unknown
'Sogd', # 10F30..10F59 ; Sogdian
- 'Zzzz', # 10F5A..10FAF ; Unknown
+ 'Zzzz', # 10F5A..10F6F ; Unknown
+ 'Ougr', # 10F70..10F89 ; Old_Uyghur
+ 'Zzzz', # 10F8A..10FAF ; Unknown
'Chrs', # 10FB0..10FCB ; Chorasmian
'Zzzz', # 10FCC..10FDF ; Unknown
'Elym', # 10FE0..10FF6 ; Elymaic
'Zzzz', # 10FF7..10FFF ; Unknown
'Brah', # 11000..1104D ; Brahmi
'Zzzz', # 1104E..11051 ; Unknown
- 'Brah', # 11052..1106F ; Brahmi
- 'Zzzz', # 11070..1107E ; Unknown
+ 'Brah', # 11052..11075 ; Brahmi
+ 'Zzzz', # 11076..1107E ; Unknown
'Brah', # 1107F..1107F ; Brahmi
- 'Kthi', # 11080..110C1 ; Kaithi
- 'Zzzz', # 110C2..110CC ; Unknown
+ 'Kthi', # 11080..110C2 ; Kaithi
+ 'Zzzz', # 110C3..110CC ; Unknown
'Kthi', # 110CD..110CD ; Kaithi
'Zzzz', # 110CE..110CF ; Unknown
'Sora', # 110D0..110E8 ; Sora_Sompeng
@@ -2759,16 +2828,16 @@ VALUES = [
'Zzzz', # 1165A..1165F ; Unknown
'Mong', # 11660..1166C ; Mongolian
'Zzzz', # 1166D..1167F ; Unknown
- 'Takr', # 11680..116B8 ; Takri
- 'Zzzz', # 116B9..116BF ; Unknown
+ 'Takr', # 11680..116B9 ; Takri
+ 'Zzzz', # 116BA..116BF ; Unknown
'Takr', # 116C0..116C9 ; Takri
'Zzzz', # 116CA..116FF ; Unknown
'Ahom', # 11700..1171A ; Ahom
'Zzzz', # 1171B..1171C ; Unknown
'Ahom', # 1171D..1172B ; Ahom
'Zzzz', # 1172C..1172F ; Unknown
- 'Ahom', # 11730..1173F ; Ahom
- 'Zzzz', # 11740..117FF ; Unknown
+ 'Ahom', # 11730..11746 ; Ahom
+ 'Zzzz', # 11747..117FF ; Unknown
'Dogr', # 11800..1183B ; Dogra
'Zzzz', # 1183C..1189F ; Unknown
'Wara', # 118A0..118F2 ; Warang_Citi
@@ -2799,7 +2868,8 @@ VALUES = [
'Zanb', # 11A00..11A47 ; Zanabazar_Square
'Zzzz', # 11A48..11A4F ; Unknown
'Soyo', # 11A50..11AA2 ; Soyombo
- 'Zzzz', # 11AA3..11ABF ; Unknown
+ 'Zzzz', # 11AA3..11AAF ; Unknown
+ 'Cans', # 11AB0..11ABF ; Canadian_Aboriginal
'Pauc', # 11AC0..11AF8 ; Pau_Cin_Hau
'Zzzz', # 11AF9..11BFF ; Unknown
'Bhks', # 11C00..11C08 ; Bhaiksuki
@@ -2856,7 +2926,9 @@ VALUES = [
'Xsux', # 12470..12474 ; Cuneiform
'Zzzz', # 12475..1247F ; Unknown
'Xsux', # 12480..12543 ; Cuneiform
- 'Zzzz', # 12544..12FFF ; Unknown
+ 'Zzzz', # 12544..12F8F ; Unknown
+ 'Cpmn', # 12F90..12FF2 ; Cypro_Minoan
+ 'Zzzz', # 12FF3..12FFF ; Unknown
'Egyp', # 13000..1342E ; Egyptian_Hieroglyphs
'Zzzz', # 1342F..1342F ; Unknown
'Egyp', # 13430..13438 ; Egyptian_Hieroglyphs
@@ -2870,7 +2942,10 @@ VALUES = [
'Mroo', # 16A60..16A69 ; Mro
'Zzzz', # 16A6A..16A6D ; Unknown
'Mroo', # 16A6E..16A6F ; Mro
- 'Zzzz', # 16A70..16ACF ; Unknown
+ 'Tnsa', # 16A70..16ABE ; Tangsa
+ 'Zzzz', # 16ABF..16ABF ; Unknown
+ 'Tnsa', # 16AC0..16AC9 ; Tangsa
+ 'Zzzz', # 16ACA..16ACF ; Unknown
'Bass', # 16AD0..16AED ; Bassa_Vah
'Zzzz', # 16AEE..16AEF ; Unknown
'Bass', # 16AF0..16AF5 ; Bassa_Vah
@@ -2895,7 +2970,7 @@ VALUES = [
'Zzzz', # 16FA0..16FDF ; Unknown
'Tang', # 16FE0..16FE0 ; Tangut
'Nshu', # 16FE1..16FE1 ; Nushu
- 'Zyyy', # 16FE2..16FE3 ; Common
+ 'Hani', # 16FE2..16FE3 ; Han
'Kits', # 16FE4..16FE4 ; Khitan_Small_Script
'Zzzz', # 16FE5..16FEF ; Unknown
'Hani', # 16FF0..16FF1 ; Han
@@ -2906,10 +2981,17 @@ VALUES = [
'Kits', # 18B00..18CD5 ; Khitan_Small_Script
'Zzzz', # 18CD6..18CFF ; Unknown
'Tang', # 18D00..18D08 ; Tangut
- 'Zzzz', # 18D09..1AFFF ; Unknown
+ 'Zzzz', # 18D09..1AFEF ; Unknown
+ 'Kana', # 1AFF0..1AFF3 ; Katakana
+ 'Zzzz', # 1AFF4..1AFF4 ; Unknown
+ 'Kana', # 1AFF5..1AFFB ; Katakana
+ 'Zzzz', # 1AFFC..1AFFC ; Unknown
+ 'Kana', # 1AFFD..1AFFE ; Katakana
+ 'Zzzz', # 1AFFF..1AFFF ; Unknown
'Kana', # 1B000..1B000 ; Katakana
- 'Hira', # 1B001..1B11E ; Hiragana
- 'Zzzz', # 1B11F..1B14F ; Unknown
+ 'Hira', # 1B001..1B11F ; Hiragana
+ 'Kana', # 1B120..1B122 ; Katakana
+ 'Zzzz', # 1B123..1B14F ; Unknown
'Hira', # 1B150..1B152 ; Hiragana
'Zzzz', # 1B153..1B163 ; Unknown
'Kana', # 1B164..1B167 ; Katakana
@@ -2926,7 +3008,13 @@ VALUES = [
'Zzzz', # 1BC9A..1BC9B ; Unknown
'Dupl', # 1BC9C..1BC9F ; Duployan
'Zyyy', # 1BCA0..1BCA3 ; Common
- 'Zzzz', # 1BCA4..1CFFF ; Unknown
+ 'Zzzz', # 1BCA4..1CEFF ; Unknown
+ 'Zinh', # 1CF00..1CF2D ; Inherited
+ 'Zzzz', # 1CF2E..1CF2F ; Unknown
+ 'Zinh', # 1CF30..1CF46 ; Inherited
+ 'Zzzz', # 1CF47..1CF4F ; Unknown
+ 'Zyyy', # 1CF50..1CFC3 ; Common
+ 'Zzzz', # 1CFC4..1CFFF ; Unknown
'Zyyy', # 1D000..1D0F5 ; Common
'Zzzz', # 1D0F6..1D0FF ; Unknown
'Zyyy', # 1D100..1D126 ; Common
@@ -2939,8 +3027,8 @@ VALUES = [
'Zinh', # 1D185..1D18B ; Inherited
'Zyyy', # 1D18C..1D1A9 ; Common
'Zinh', # 1D1AA..1D1AD ; Inherited
- 'Zyyy', # 1D1AE..1D1E8 ; Common
- 'Zzzz', # 1D1E9..1D1FF ; Unknown
+ 'Zyyy', # 1D1AE..1D1EA ; Common
+ 'Zzzz', # 1D1EB..1D1FF ; Unknown
'Grek', # 1D200..1D245 ; Greek
'Zzzz', # 1D246..1D2DF ; Unknown
'Zyyy', # 1D2E0..1D2F3 ; Common
@@ -2995,7 +3083,9 @@ VALUES = [
'Sgnw', # 1DA9B..1DA9F ; SignWriting
'Zzzz', # 1DAA0..1DAA0 ; Unknown
'Sgnw', # 1DAA1..1DAAF ; SignWriting
- 'Zzzz', # 1DAB0..1DFFF ; Unknown
+ 'Zzzz', # 1DAB0..1DEFF ; Unknown
+ 'Latn', # 1DF00..1DF1E ; Latin
+ 'Zzzz', # 1DF1F..1DFFF ; Unknown
'Glag', # 1E000..1E006 ; Glagolitic
'Zzzz', # 1E007..1E007 ; Unknown
'Glag', # 1E008..1E018 ; Glagolitic
@@ -3013,11 +3103,21 @@ VALUES = [
'Hmnp', # 1E140..1E149 ; Nyiakeng_Puachue_Hmong
'Zzzz', # 1E14A..1E14D ; Unknown
'Hmnp', # 1E14E..1E14F ; Nyiakeng_Puachue_Hmong
- 'Zzzz', # 1E150..1E2BF ; Unknown
+ 'Zzzz', # 1E150..1E28F ; Unknown
+ 'Toto', # 1E290..1E2AE ; Toto
+ 'Zzzz', # 1E2AF..1E2BF ; Unknown
'Wcho', # 1E2C0..1E2F9 ; Wancho
'Zzzz', # 1E2FA..1E2FE ; Unknown
'Wcho', # 1E2FF..1E2FF ; Wancho
- 'Zzzz', # 1E300..1E7FF ; Unknown
+ 'Zzzz', # 1E300..1E7DF ; Unknown
+ 'Ethi', # 1E7E0..1E7E6 ; Ethiopic
+ 'Zzzz', # 1E7E7..1E7E7 ; Unknown
+ 'Ethi', # 1E7E8..1E7EB ; Ethiopic
+ 'Zzzz', # 1E7EC..1E7EC ; Unknown
+ 'Ethi', # 1E7ED..1E7EE ; Ethiopic
+ 'Zzzz', # 1E7EF..1E7EF ; Unknown
+ 'Ethi', # 1E7F0..1E7FE ; Ethiopic
+ 'Zzzz', # 1E7FF..1E7FF ; Unknown
'Mend', # 1E800..1E8C4 ; Mende_Kikakui
'Zzzz', # 1E8C5..1E8C6 ; Unknown
'Mend', # 1E8C7..1E8D6 ; Mende_Kikakui
@@ -3127,8 +3227,8 @@ VALUES = [
'Zyyy', # 1F260..1F265 ; Common
'Zzzz', # 1F266..1F2FF ; Unknown
'Zyyy', # 1F300..1F6D7 ; Common
- 'Zzzz', # 1F6D8..1F6DF ; Unknown
- 'Zyyy', # 1F6E0..1F6EC ; Common
+ 'Zzzz', # 1F6D8..1F6DC ; Unknown
+ 'Zyyy', # 1F6DD..1F6EC ; Common
'Zzzz', # 1F6ED..1F6EF ; Unknown
'Zyyy', # 1F6F0..1F6FC ; Common
'Zzzz', # 1F6FD..1F6FF ; Unknown
@@ -3137,7 +3237,9 @@ VALUES = [
'Zyyy', # 1F780..1F7D8 ; Common
'Zzzz', # 1F7D9..1F7DF ; Unknown
'Zyyy', # 1F7E0..1F7EB ; Common
- 'Zzzz', # 1F7EC..1F7FF ; Unknown
+ 'Zzzz', # 1F7EC..1F7EF ; Unknown
+ 'Zyyy', # 1F7F0..1F7F0 ; Common
+ 'Zzzz', # 1F7F1..1F7FF ; Unknown
'Zyyy', # 1F800..1F80B ; Common
'Zzzz', # 1F80C..1F80F ; Unknown
'Zyyy', # 1F810..1F847 ; Common
@@ -3150,38 +3252,38 @@ VALUES = [
'Zzzz', # 1F8AE..1F8AF ; Unknown
'Zyyy', # 1F8B0..1F8B1 ; Common
'Zzzz', # 1F8B2..1F8FF ; Unknown
- 'Zyyy', # 1F900..1F978 ; Common
- 'Zzzz', # 1F979..1F979 ; Unknown
- 'Zyyy', # 1F97A..1F9CB ; Common
- 'Zzzz', # 1F9CC..1F9CC ; Unknown
- 'Zyyy', # 1F9CD..1FA53 ; Common
+ 'Zyyy', # 1F900..1FA53 ; Common
'Zzzz', # 1FA54..1FA5F ; Unknown
'Zyyy', # 1FA60..1FA6D ; Common
'Zzzz', # 1FA6E..1FA6F ; Unknown
'Zyyy', # 1FA70..1FA74 ; Common
'Zzzz', # 1FA75..1FA77 ; Unknown
- 'Zyyy', # 1FA78..1FA7A ; Common
- 'Zzzz', # 1FA7B..1FA7F ; Unknown
+ 'Zyyy', # 1FA78..1FA7C ; Common
+ 'Zzzz', # 1FA7D..1FA7F ; Unknown
'Zyyy', # 1FA80..1FA86 ; Common
'Zzzz', # 1FA87..1FA8F ; Unknown
- 'Zyyy', # 1FA90..1FAA8 ; Common
- 'Zzzz', # 1FAA9..1FAAF ; Unknown
- 'Zyyy', # 1FAB0..1FAB6 ; Common
- 'Zzzz', # 1FAB7..1FABF ; Unknown
- 'Zyyy', # 1FAC0..1FAC2 ; Common
- 'Zzzz', # 1FAC3..1FACF ; Unknown
- 'Zyyy', # 1FAD0..1FAD6 ; Common
- 'Zzzz', # 1FAD7..1FAFF ; Unknown
+ 'Zyyy', # 1FA90..1FAAC ; Common
+ 'Zzzz', # 1FAAD..1FAAF ; Unknown
+ 'Zyyy', # 1FAB0..1FABA ; Common
+ 'Zzzz', # 1FABB..1FABF ; Unknown
+ 'Zyyy', # 1FAC0..1FAC5 ; Common
+ 'Zzzz', # 1FAC6..1FACF ; Unknown
+ 'Zyyy', # 1FAD0..1FAD9 ; Common
+ 'Zzzz', # 1FADA..1FADF ; Unknown
+ 'Zyyy', # 1FAE0..1FAE7 ; Common
+ 'Zzzz', # 1FAE8..1FAEF ; Unknown
+ 'Zyyy', # 1FAF0..1FAF6 ; Common
+ 'Zzzz', # 1FAF7..1FAFF ; Unknown
'Zyyy', # 1FB00..1FB92 ; Common
'Zzzz', # 1FB93..1FB93 ; Unknown
'Zyyy', # 1FB94..1FBCA ; Common
'Zzzz', # 1FBCB..1FBEF ; Unknown
'Zyyy', # 1FBF0..1FBF9 ; Common
'Zzzz', # 1FBFA..1FFFF ; Unknown
- 'Hani', # 20000..2A6DD ; Han
- 'Zzzz', # 2A6DE..2A6FF ; Unknown
- 'Hani', # 2A700..2B734 ; Han
- 'Zzzz', # 2B735..2B73F ; Unknown
+ 'Hani', # 20000..2A6DF ; Han
+ 'Zzzz', # 2A6E0..2A6FF ; Unknown
+ 'Hani', # 2A700..2B738 ; Han
+ 'Zzzz', # 2B739..2B73F ; Unknown
'Hani', # 2B740..2B81D ; Han
'Zzzz', # 2B81E..2B81F ; Unknown
'Hani', # 2B820..2CEA1 ; Han
@@ -3226,6 +3328,7 @@ NAMES = {
'Cher': 'Cherokee',
'Chrs': 'Chorasmian',
'Copt': 'Coptic',
+ 'Cpmn': 'Cypro_Minoan',
'Cprt': 'Cypriot',
'Cyrl': 'Cyrillic',
'Deva': 'Devanagari',
@@ -3305,6 +3408,7 @@ NAMES = {
'Orya': 'Oriya',
'Osge': 'Osage',
'Osma': 'Osmanya',
+ 'Ougr': 'Old_Uyghur',
'Palm': 'Palmyrene',
'Pauc': 'Pau_Cin_Hau',
'Perm': 'Old_Permic',
@@ -3347,8 +3451,11 @@ NAMES = {
'Thai': 'Thai',
'Tibt': 'Tibetan',
'Tirh': 'Tirhuta',
+ 'Tnsa': 'Tangsa',
+ 'Toto': 'Toto',
'Ugar': 'Ugaritic',
'Vaii': 'Vai',
+ 'Vith': 'Vithkuqi',
'Wara': 'Warang_Citi',
'Wcho': 'Wancho',
'Xpeo': 'Old_Persian',
diff --git a/Lib/fontTools/unicodedata/__init__.py b/Lib/fontTools/unicodedata/__init__.py
index 8845b829..4546ef3f 100644
--- a/Lib/fontTools/unicodedata/__init__.py
+++ b/Lib/fontTools/unicodedata/__init__.py
@@ -1,11 +1,11 @@
-from fontTools.misc.py23 import byteord, tostr
+from fontTools.misc.textTools import byteord, tostr
import re
from bisect import bisect_right
try:
# use unicodedata backport compatible with python2:
- # https://github.com/mikekap/unicodedata2
+ # https://github.com/fonttools/unicodedata2
from unicodedata2 import *
except ImportError: # pragma: no cover
# fall back to built-in unicodedata (possibly outdated)
@@ -73,7 +73,7 @@ def script_extension(char):
>>> script_extension("a") == {'Latn'}
True
- >>> script_extension(chr(0x060C)) == {'Rohg', 'Syrc', 'Yezi', 'Arab', 'Thaa'}
+ >>> script_extension(chr(0x060C)) == {'Rohg', 'Syrc', 'Yezi', 'Arab', 'Thaa', 'Nkoo'}
True
>>> script_extension(chr(0x10FFFF)) == {'Zzzz'}
True
@@ -134,8 +134,10 @@ def script_code(script_name, default=KeyError):
return default
-# The data on script direction is taken from CLDR 37:
-# https://github.com/unicode-org/cldr/blob/release-37/common/properties/scriptMetadata.txt
+# The data on script direction is taken from Harfbuzz source code:
+# https://github.com/harfbuzz/harfbuzz/blob/3.2.0/src/hb-common.cc#L514-L613
+# This in turn references the following "Script_Metadata" document:
+# https://docs.google.com/spreadsheets/d/1Y90M0Ie3MUJ6UVCRDOypOtijlMDLNNyyLk36T6iMu0o
RTL_SCRIPTS = {
# Unicode-1.1 additions
'Arab', # Arabic
@@ -200,6 +202,9 @@ RTL_SCRIPTS = {
# Unicode-13.0 additions
'Chrs', # Chorasmian
'Yezi', # Yezidi
+
+ # Unicode-14.0 additions
+ 'Ougr', # Old Uyghur
}
def script_horizontal_direction(script_code, default=KeyError):
@@ -259,6 +264,9 @@ def ot_tag_to_script(tag):
if not tag or " " in tag or len(tag) > 4:
raise ValueError("invalid OpenType tag: %r" % tag)
+ if tag in OTTags.SCRIPT_ALIASES:
+ tag = OTTags.SCRIPT_ALIASES[tag]
+
while len(tag) != 4:
tag += str(" ") # pad with spaces
diff --git a/Lib/fontTools/varLib/__init__.py b/Lib/fontTools/varLib/__init__.py
index 36ff0d97..15c2e700 100644
--- a/Lib/fontTools/varLib/__init__.py
+++ b/Lib/fontTools/varLib/__init__.py
@@ -18,9 +18,9 @@ Then you can make a variable-font this way:
API *will* change in near future.
"""
-from fontTools.misc.py23 import Tag, tostr
-from fontTools.misc.roundTools import noRound, otRound
from fontTools.misc.vector import Vector
+from fontTools.misc.roundTools import noRound, otRound
+from fontTools.misc.textTools import Tag, tostr
from fontTools.ttLib import TTFont, newTable
from fontTools.ttLib.tables._f_v_a_r import Axis, NamedInstance
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
@@ -212,6 +212,7 @@ def _add_stat(font, axes):
axes = [dict(tag=a.axisTag, name=a.axisNameID) for a in fvarTable.axes]
buildStatTable(font, axes)
+_MasterData = namedtuple('_MasterData', ['glyf', 'hMetrics', 'vMetrics'])
def _add_gvar(font, masterModel, master_ttfs, tolerance=0.5, optimize=True):
if tolerance < 0:
@@ -223,15 +224,18 @@ def _add_gvar(font, masterModel, master_ttfs, tolerance=0.5, optimize=True):
glyf = font['glyf']
defaultMasterIndex = masterModel.reverseMapping[0]
- # use hhea.ascent of base master as default vertical origin when vmtx is missing
- baseAscent = font['hhea'].ascent
- for glyph in font.getGlyphOrder():
+ master_datas = [_MasterData(m['glyf'],
+ m['hmtx'].metrics,
+ getattr(m.get('vmtx'), 'metrics', None))
+ for m in master_ttfs]
+ for glyph in font.getGlyphOrder():
+ log.debug("building gvar for glyph '%s'", glyph)
isComposite = glyf[glyph].isComposite()
allData = [
- m["glyf"].getCoordinatesAndControls(glyph, m, defaultVerticalOrigin=baseAscent)
- for m in master_ttfs
+ m.glyf._getCoordinatesAndControls(glyph, m.hMetrics, m.vMetrics)
+ for m in master_datas
]
if allData[defaultMasterIndex][1].numberOfContours != 0:
@@ -284,9 +288,9 @@ def _add_gvar(font, masterModel, master_ttfs, tolerance=0.5, optimize=True):
var_opt = TupleVariation(support, delta_opt)
axis_tags = sorted(support.keys()) # Shouldn't matter that this is different from fvar...?
- tupleData, auxData, _ = var.compile(axis_tags, [], None)
+ tupleData, auxData = var.compile(axis_tags)
unoptimized_len = len(tupleData) + len(auxData)
- tupleData, auxData, _ = var_opt.compile(axis_tags, [], None)
+ tupleData, auxData = var_opt.compile(axis_tags)
optimized_len = len(tupleData) + len(auxData)
if optimized_len < unoptimized_len:
@@ -299,9 +303,10 @@ def _remove_TTHinting(font):
for tag in ("cvar", "cvt ", "fpgm", "prep"):
if tag in font:
del font[tag]
+ maxp = font['maxp']
for attr in ("maxTwilightPoints", "maxStorage", "maxFunctionDefs", "maxInstructionDefs", "maxStackElements", "maxSizeOfInstructions"):
- setattr(font["maxp"], attr, 0)
- font["maxp"].maxZones = 1
+ setattr(maxp, attr, 0)
+ maxp.maxZones = 1
font["glyf"].removeHinting()
# TODO: Modify gasp table to deactivate gridfitting for all ranges?
@@ -316,12 +321,9 @@ def _merge_TTHinting(font, masterModel, master_ttfs):
for tag in ("fpgm", "prep"):
all_pgms = [m[tag].program for m in master_ttfs if tag in m]
- if len(all_pgms) == 0:
+ if not all_pgms:
continue
- if tag in font:
- font_pgm = font[tag].program
- else:
- font_pgm = Program()
+ font_pgm = getattr(font.get(tag), 'program', None)
if any(pgm != font_pgm for pgm in all_pgms):
log.warning("Masters have incompatible %s tables, hinting is discarded." % tag)
_remove_TTHinting(font)
@@ -329,19 +331,17 @@ def _merge_TTHinting(font, masterModel, master_ttfs):
# glyf table
- for name, glyph in font["glyf"].glyphs.items():
+ font_glyf = font['glyf']
+ master_glyfs = [m['glyf'] for m in master_ttfs]
+ for name, glyph in font_glyf.glyphs.items():
all_pgms = [
- m["glyf"][name].program
- for m in master_ttfs
- if name in m['glyf'] and hasattr(m["glyf"][name], "program")
+ getattr(glyf.get(name), 'program', None)
+ for glyf in master_glyfs
]
if not any(all_pgms):
continue
- glyph.expand(font["glyf"])
- if hasattr(glyph, "program"):
- font_pgm = glyph.program
- else:
- font_pgm = Program()
+ glyph.expand(font_glyf)
+ font_pgm = getattr(glyph, 'program', None)
if any(pgm != font_pgm for pgm in all_pgms if pgm):
log.warning("Masters have incompatible glyph programs in glyph '%s', hinting is discarded." % name)
# TODO Only drop hinting from this glyph.
diff --git a/Lib/fontTools/varLib/builder.py b/Lib/fontTools/varLib/builder.py
index 152336b0..60d7172e 100644
--- a/Lib/fontTools/varLib/builder.py
+++ b/Lib/fontTools/varLib/builder.py
@@ -26,39 +26,54 @@ def buildVarRegionList(supports, axisTags):
return self
-def _reorderItem(lst, narrows, zeroes):
- out = []
- count = len(lst)
- for i in range(count):
- if i not in narrows:
- out.append(lst[i])
- for i in range(count):
- if i in narrows and i not in zeroes:
- out.append(lst[i])
- return out
+def _reorderItem(lst, mapping):
+ return [lst[i] for i in mapping]
def VarData_calculateNumShorts(self, optimize=False):
count = self.VarRegionCount
items = self.Item
- narrows = set(range(count))
- zeroes = set(range(count))
+ bit_lengths = [0] * count
for item in items:
- wides = [i for i in narrows if not (-128 <= item[i] <= 127)]
- narrows.difference_update(wides)
- nonzeroes = [i for i in zeroes if item[i]]
- zeroes.difference_update(nonzeroes)
- if not narrows and not zeroes:
- break
+ # The "+ (i < -1)" magic is to handle two's-compliment.
+ # That is, we want to get back 7 for -128, whereas
+ # bit_length() returns 8. Similarly for -65536.
+ # The reason "i < -1" is used instead of "i < 0" is that
+ # the latter would make it return 0 for "-1" instead of 1.
+ bl = [(i + (i < -1)).bit_length() for i in item]
+ bit_lengths = [max(*pair) for pair in zip(bl, bit_lengths)]
+ # The addition of 8, instead of seven, is to account for the sign bit.
+ # This "((b + 8) >> 3) if b else 0" when combined with the above
+ # "(i + (i < -1)).bit_length()" is a faster way to compute byte-lengths
+ # conforming to:
+ #
+ # byte_length = (0 if i == 0 else
+ # 1 if -128 <= i < 128 else
+ # 2 if -65536 <= i < 65536 else
+ # ...)
+ byte_lengths = [((b + 8) >> 3) if b else 0 for b in bit_lengths]
+
+ # https://github.com/fonttools/fonttools/issues/2279
+ longWords = any(b > 2 for b in byte_lengths)
+
if optimize:
- # Reorder columns such that all SHORT columns come before UINT8
- self.VarRegionIndex = _reorderItem(self.VarRegionIndex, narrows, zeroes)
+ # Reorder columns such that wider columns come before narrower columns
+ mapping = []
+ mapping.extend(i for i,b in enumerate(byte_lengths) if b > 2)
+ mapping.extend(i for i,b in enumerate(byte_lengths) if b == 2)
+ mapping.extend(i for i,b in enumerate(byte_lengths) if b == 1)
+
+ byte_lengths = _reorderItem(byte_lengths, mapping)
+ self.VarRegionIndex = _reorderItem(self.VarRegionIndex, mapping)
self.VarRegionCount = len(self.VarRegionIndex)
for i in range(len(items)):
- items[i] = _reorderItem(items[i], narrows, zeroes)
- self.NumShorts = count - len(narrows)
+ items[i] = _reorderItem(items[i], mapping)
+
+ if longWords:
+ self.NumShorts = max((i for i,b in enumerate(byte_lengths) if b > 2), default=-1) + 1
+ self.NumShorts |= 0x8000
else:
- wides = set(range(count)) - narrows
- self.NumShorts = 1+max(wides) if wides else 0
+ self.NumShorts = max((i for i,b in enumerate(byte_lengths) if b > 1), default=-1) + 1
+
self.VarRegionCount = len(self.VarRegionIndex)
return self
@@ -106,6 +121,14 @@ def buildVarIdxMap(varIdxes, glyphOrder):
self.mapping = {g:v for g,v in zip(glyphOrder, varIdxes)}
return self
+
+def buildDeltaSetIndexMap(varIdxes):
+ self = ot.DeltaSetIndexMap()
+ self.mapping = list(varIdxes)
+ self.Format = 1 if len(varIdxes) > 0xFFFF else 0
+ return self
+
+
def buildVarDevTable(varIdx):
self = ot.Device()
self.DeltaFormat = 0x8000
diff --git a/Lib/fontTools/varLib/cff.py b/Lib/fontTools/varLib/cff.py
index 4eed8b33..08ddfc41 100644
--- a/Lib/fontTools/varLib/cff.py
+++ b/Lib/fontTools/varLib/cff.py
@@ -163,15 +163,17 @@ def merge_PrivateDicts(top_dicts, vsindex_dict, var_model, fd_map):
"""
I step through the FontDicts in the FDArray of the varfont TopDict.
For each varfont FontDict:
- step through each key in FontDict.Private.
- For each key, step through each relevant source font Private dict, and
+
+ * step through each key in FontDict.Private.
+ * For each key, step through each relevant source font Private dict, and
build a list of values to blend.
+
The 'relevant' source fonts are selected by first getting the right
- submodel using vsindex_dict[vsindex]. The indices of the
- subModel.locations are mapped to source font list indices by
+ submodel using ``vsindex_dict[vsindex]``. The indices of the
+ ``subModel.locations`` are mapped to source font list indices by
assuming the latter order is the same as the order of the
- var_model.locations. I can then get the index of each subModel
- location in the list of var_model.locations.
+ ``var_model.locations``. I can then get the index of each subModel
+ location in the list of ``var_model.locations``.
"""
topDict = top_dicts[0]
@@ -591,19 +593,24 @@ class CFF2CharStringMergePen(T2CharStringPen):
def reorder_blend_args(self, commands, get_delta_func):
"""
We first re-order the master coordinate values.
- For a moveto to lineto, the args are now arranged as:
+ For a moveto to lineto, the args are now arranged as::
+
[ [master_0 x,y], [master_1 x,y], [master_2 x,y] ]
- We re-arrange this to
- [ [master_0 x, master_1 x, master_2 x],
- [master_0 y, master_1 y, master_2 y]
- ]
+
+ We re-arrange this to::
+
+ [ [master_0 x, master_1 x, master_2 x],
+ [master_0 y, master_1 y, master_2 y]
+ ]
+
If the master values are all the same, we collapse the list to
as single value instead of a list.
- We then convert this to:
- [ [master_0 x] + [x delta tuple] + [numBlends=1]
- [master_0 y] + [y delta tuple] + [numBlends=1]
- ]
+ We then convert this to::
+
+ [ [master_0 x] + [x delta tuple] + [numBlends=1]
+ [master_0 y] + [y delta tuple] + [numBlends=1]
+ ]
"""
for cmd in commands:
# arg[i] is the set of arguments for this operator from master i.
diff --git a/Lib/fontTools/varLib/errors.py b/Lib/fontTools/varLib/errors.py
index 5840070f..c5a149cb 100644
--- a/Lib/fontTools/varLib/errors.py
+++ b/Lib/fontTools/varLib/errors.py
@@ -12,7 +12,7 @@ class VarLibValidationError(VarLibError):
class VarLibMergeError(VarLibError):
"""Raised when input data cannot be merged into a variable font."""
- def __init__(self, merger, **kwargs):
+ def __init__(self, merger=None, **kwargs):
self.merger = merger
if not kwargs:
kwargs = {}
@@ -28,17 +28,17 @@ class VarLibMergeError(VarLibError):
return self.__doc__
def _master_name(self, ix):
- ttf = self.merger.ttfs[ix]
- if (
- "name" in ttf
- and ttf["name"].getDebugName(1)
- and ttf["name"].getDebugName(2)
- ):
- return ttf["name"].getDebugName(1) + " " + ttf["name"].getDebugName(2)
- elif hasattr(ttf.reader, "file") and hasattr(ttf.reader.file, "name"):
- return ttf.reader.file.name
- else:
- return "master number %i" % ix
+ if self.merger is not None:
+ ttf = self.merger.ttfs[ix]
+ if (
+ "name" in ttf
+ and ttf["name"].getDebugName(1)
+ and ttf["name"].getDebugName(2)
+ ):
+ return ttf["name"].getDebugName(1) + " " + ttf["name"].getDebugName(2)
+ elif hasattr(ttf.reader, "file") and hasattr(ttf.reader.file, "name"):
+ return ttf.reader.file.name
+ return f"master number {ix}"
@property
def offender(self):
@@ -76,7 +76,7 @@ class ShouldBeConstant(VarLibMergeError):
@property
def details(self):
- if self.stack[0] != ".FeatureCount":
+ if self.stack[0] != ".FeatureCount" or self.merger is None:
return super().details
offender_index, offender = self.offender
bad_ttf = self.merger.ttfs[offender_index]
@@ -102,13 +102,12 @@ class FoundANone(VarLibMergeError):
@property
def offender(self):
- cause = self.argv[0]
- index = [x is None for x in cause["got"]].index(True)
+ index = [x is None for x in self.cause["got"]].index(True)
return index, self._master_name(index)
@property
def details(self):
- cause, stack = self.args[0], self.args[1:]
+ cause, stack = self.cause, self.stack
return f"{stack[0]}=={cause['got']}\n"
@@ -137,8 +136,7 @@ class UnsupportedFormat(VarLibMergeError):
@property
def reason(self):
- cause, stack = self.args[0], self.args[1:]
- return self.__doc__ % cause["subtable"]
+ return self.__doc__ % self.cause["subtable"]
class UnsupportedFormat(UnsupportedFormat):
diff --git a/Lib/fontTools/varLib/featureVars.py b/Lib/fontTools/varLib/featureVars.py
index 45f3d839..e3366327 100644
--- a/Lib/fontTools/varLib/featureVars.py
+++ b/Lib/fontTools/varLib/featureVars.py
@@ -44,8 +44,26 @@ def addFeatureVariations(font, conditionalSubstitutions, featureTag='rvrn'):
# >>> f.save(dstPath)
"""
- addFeatureVariationsRaw(font,
- overlayFeatureVariations(conditionalSubstitutions),
+
+ substitutions = overlayFeatureVariations(conditionalSubstitutions)
+
+ # turn substitution dicts into tuples of tuples, so they are hashable
+ conditionalSubstitutions, allSubstitutions = makeSubstitutionsHashable(substitutions)
+ if "GSUB" not in font:
+ font["GSUB"] = buildGSUB()
+
+ # setup lookups
+ lookupMap = buildSubstitutionLookups(font["GSUB"].table, allSubstitutions)
+
+ # addFeatureVariationsRaw takes a list of
+ # ( {condition}, [ lookup indices ] )
+ # so rearrange our lookups to match
+ conditionsAndLookups = []
+ for conditionSet, substitutions in conditionalSubstitutions:
+ conditionsAndLookups.append((conditionSet, [lookupMap[s] for s in substitutions]))
+
+ addFeatureVariationsRaw(font, font["GSUB"].table,
+ conditionsAndLookups,
featureTag)
def overlayFeatureVariations(conditionalSubstitutions):
@@ -76,20 +94,22 @@ def overlayFeatureVariations(conditionalSubstitutions):
substitution dictionaries. These dictionaries are not merged to allow data
sharing when they are converted into font tables.
- Example:
- >>> condSubst = [
- ... # A list of (Region, Substitution) tuples.
- ... ([{"wght": (0.5, 1.0)}], {"dollar": "dollar.rvrn"}),
- ... ([{"wght": (0.5, 1.0)}], {"dollar": "dollar.rvrn"}),
- ... ([{"wdth": (0.5, 1.0)}], {"cent": "cent.rvrn"}),
- ... ([{"wght": (0.5, 1.0), "wdth": (-1, 1.0)}], {"dollar": "dollar.rvrn"}),
- ... ]
- >>> from pprint import pprint
- >>> pprint(overlayFeatureVariations(condSubst))
- [({'wdth': (0.5, 1.0), 'wght': (0.5, 1.0)},
- [{'dollar': 'dollar.rvrn'}, {'cent': 'cent.rvrn'}]),
- ({'wdth': (0.5, 1.0)}, [{'cent': 'cent.rvrn'}]),
- ({'wght': (0.5, 1.0)}, [{'dollar': 'dollar.rvrn'}])]
+ Example::
+
+ >>> condSubst = [
+ ... # A list of (Region, Substitution) tuples.
+ ... ([{"wght": (0.5, 1.0)}], {"dollar": "dollar.rvrn"}),
+ ... ([{"wght": (0.5, 1.0)}], {"dollar": "dollar.rvrn"}),
+ ... ([{"wdth": (0.5, 1.0)}], {"cent": "cent.rvrn"}),
+ ... ([{"wght": (0.5, 1.0), "wdth": (-1, 1.0)}], {"dollar": "dollar.rvrn"}),
+ ... ]
+ >>> from pprint import pprint
+ >>> pprint(overlayFeatureVariations(condSubst))
+ [({'wdth': (0.5, 1.0), 'wght': (0.5, 1.0)},
+ [{'dollar': 'dollar.rvrn'}, {'cent': 'cent.rvrn'}]),
+ ({'wdth': (0.5, 1.0)}, [{'cent': 'cent.rvrn'}]),
+ ({'wght': (0.5, 1.0)}, [{'dollar': 'dollar.rvrn'}])]
+
"""
# Merge same-substitutions rules, as this creates fewer number oflookups.
@@ -166,11 +186,12 @@ def overlayFeatureVariations(conditionalSubstitutions):
#
def overlayBox(top, bot):
- """Overlays `top` box on top of `bot` box.
+ """Overlays ``top`` box on top of ``bot`` box.
Returns two items:
- - Box for intersection of `top` and `bot`, or None if they don't intersect.
- - Box for remainder of `bot`. Remainder box might not be exact (since the
+
+ * Box for intersection of ``top`` and ``bot``, or None if they don't intersect.
+ * Box for remainder of ``bot``. Remainder box might not be exact (since the
remainder might not be a simple box), but is inclusive of the exact
remainder.
"""
@@ -261,7 +282,7 @@ def cleanupBox(box):
# Low level implementation
#
-def addFeatureVariationsRaw(font, conditionalSubstitutions, featureTag='rvrn'):
+def addFeatureVariationsRaw(font, table, conditionalSubstitutions, featureTag='rvrn'):
"""Low level implementation of addFeatureVariations that directly
models the possibilities of the FeatureVariations table."""
@@ -273,31 +294,25 @@ def addFeatureVariationsRaw(font, conditionalSubstitutions, featureTag='rvrn'):
# make lookups
# add feature variations
#
+ if table.Version < 0x00010001:
+ table.Version = 0x00010001 # allow table.FeatureVariations
- if "GSUB" not in font:
- font["GSUB"] = buildGSUB()
-
- gsub = font["GSUB"].table
-
- if gsub.Version < 0x00010001:
- gsub.Version = 0x00010001 # allow gsub.FeatureVariations
-
- gsub.FeatureVariations = None # delete any existing FeatureVariations
+ table.FeatureVariations = None # delete any existing FeatureVariations
varFeatureIndices = []
- for index, feature in enumerate(gsub.FeatureList.FeatureRecord):
+ for index, feature in enumerate(table.FeatureList.FeatureRecord):
if feature.FeatureTag == featureTag:
varFeatureIndices.append(index)
if not varFeatureIndices:
varFeature = buildFeatureRecord(featureTag, [])
- gsub.FeatureList.FeatureRecord.append(varFeature)
- gsub.FeatureList.FeatureCount = len(gsub.FeatureList.FeatureRecord)
+ table.FeatureList.FeatureRecord.append(varFeature)
+ table.FeatureList.FeatureCount = len(table.FeatureList.FeatureRecord)
- sortFeatureList(gsub)
- varFeatureIndex = gsub.FeatureList.FeatureRecord.index(varFeature)
+ sortFeatureList(table)
+ varFeatureIndex = table.FeatureList.FeatureRecord.index(varFeature)
- for scriptRecord in gsub.ScriptList.ScriptRecord:
+ for scriptRecord in table.ScriptList.ScriptRecord:
if scriptRecord.Script.DefaultLangSys is None:
raise VarLibError(
"Feature variations require that the script "
@@ -309,17 +324,10 @@ def addFeatureVariationsRaw(font, conditionalSubstitutions, featureTag='rvrn'):
varFeatureIndices = [varFeatureIndex]
- # setup lookups
-
- # turn substitution dicts into tuples of tuples, so they are hashable
- conditionalSubstitutions, allSubstitutions = makeSubstitutionsHashable(conditionalSubstitutions)
-
- lookupMap = buildSubstitutionLookups(gsub, allSubstitutions)
-
axisIndices = {axis.axisTag: axisIndex for axisIndex, axis in enumerate(font["fvar"].axes)}
featureVariationRecords = []
- for conditionSet, substitutions in conditionalSubstitutions:
+ for conditionSet, lookupIndices in conditionalSubstitutions:
conditionTable = []
for axisTag, (minValue, maxValue) in sorted(conditionSet.items()):
if minValue > maxValue:
@@ -328,15 +336,13 @@ def addFeatureVariationsRaw(font, conditionalSubstitutions, featureTag='rvrn'):
)
ct = buildConditionTable(axisIndices[axisTag], minValue, maxValue)
conditionTable.append(ct)
-
- lookupIndices = [lookupMap[subst] for subst in substitutions]
records = []
for varFeatureIndex in varFeatureIndices:
- existingLookupIndices = gsub.FeatureList.FeatureRecord[varFeatureIndex].Feature.LookupListIndex
+ existingLookupIndices = table.FeatureList.FeatureRecord[varFeatureIndex].Feature.LookupListIndex
records.append(buildFeatureTableSubstitutionRecord(varFeatureIndex, existingLookupIndices + lookupIndices))
featureVariationRecords.append(buildFeatureVariationRecord(conditionTable, records))
- gsub.FeatureVariations = buildFeatureVariations(featureVariationRecords)
+ table.FeatureVariations = buildFeatureVariations(featureVariationRecords)
#
@@ -413,6 +419,7 @@ def buildFeatureVariations(featureVariationRecords):
fv = ot.FeatureVariations()
fv.Version = 0x00010000
fv.FeatureVariationRecord = featureVariationRecords
+ fv.FeatureVariationCount = len(featureVariationRecords)
return fv
@@ -431,9 +438,11 @@ def buildFeatureVariationRecord(conditionTable, substitutionRecords):
fvr = ot.FeatureVariationRecord()
fvr.ConditionSet = ot.ConditionSet()
fvr.ConditionSet.ConditionTable = conditionTable
+ fvr.ConditionSet.ConditionCount = len(conditionTable)
fvr.FeatureTableSubstitution = ot.FeatureTableSubstitution()
fvr.FeatureTableSubstitution.Version = 0x00010000
fvr.FeatureTableSubstitution.SubstitutionRecord = substitutionRecords
+ fvr.FeatureTableSubstitution.SubstitutionCount = len(substitutionRecords)
return fvr
@@ -443,6 +452,7 @@ def buildFeatureTableSubstitutionRecord(featureIndex, lookupListIndices):
ftsr.FeatureIndex = featureIndex
ftsr.Feature = ot.Feature()
ftsr.Feature.LookupListIndex = lookupListIndices
+ ftsr.Feature.LookupCount = len(lookupListIndices)
return ftsr
diff --git a/Lib/fontTools/varLib/instancer/__init__.py b/Lib/fontTools/varLib/instancer/__init__.py
index 9bd30f19..cec802f3 100644
--- a/Lib/fontTools/varLib/instancer/__init__.py
+++ b/Lib/fontTools/varLib/instancer/__init__.py
@@ -5,9 +5,9 @@ create full instances (i.e. static fonts) from variable fonts, as well as "parti
variable fonts that only contain a subset of the original variation space.
For example, if you wish to pin the width axis to a given location while also
-restricting the weight axis to 400..700 range, you can do:
+restricting the weight axis to 400..700 range, you can do::
-$ fonttools varLib.instancer ./NotoSans-VF.ttf wdth=85 wght=400:700
+ $ fonttools varLib.instancer ./NotoSans-VF.ttf wdth=85 wght=400:700
See `fonttools varLib.instancer --help` for more info on the CLI options.
@@ -17,7 +17,7 @@ and returns a new TTFont representing either a partial VF, or full instance if a
the VF axes were given an explicit coordinate.
E.g. here's how to pin the wght axis at a given location in a wght+wdth variable
-font, keeping only the deltas associated with the wdth axis:
+font, keeping only the deltas associated with the wdth axis::
| >>> from fontTools import ttLib
| >>> from fontTools.varLib import instancer
@@ -53,12 +53,17 @@ whereas mutator implicitly drops the axis at its default coordinate.
The module currently supports only the first three "levels" of partial instancing,
with the rest planned to be implemented in the future, namely:
-L1) dropping one or more axes while leaving the default tables unmodified;
-L2) dropping one or more axes while pinning them at non-default locations;
-L3) restricting the range of variation of one or more axes, by setting either
+
+L1
+ dropping one or more axes while leaving the default tables unmodified;
+L2
+ dropping one or more axes while pinning them at non-default locations;
+L3
+ restricting the range of variation of one or more axes, by setting either
a new minimum or maximum, potentially -- though not necessarily -- dropping
entire regions of variations that fall completely outside this new range.
-L4) moving the default location of an axis.
+L4
+ moving the default location of an axis.
Currently only TrueType-flavored variable fonts (i.e. containing 'glyf' table)
are supported, but support for CFF2 variable fonts will be added soon.
@@ -127,6 +132,7 @@ class OverlapMode(IntEnum):
KEEP_AND_DONT_SET_FLAGS = 0
KEEP_AND_SET_FLAGS = 1
REMOVE = 2
+ REMOVE_AND_IGNORE_ERRORS = 3
def instantiateTupleVariationStore(
@@ -156,7 +162,7 @@ def instantiateTupleVariationStore(
axisLimits: Dict[str, Union[float, NormalizedAxisRange]]: axes' coordinates for
the full or partial instance, or ranges for restricting an axis' min/max.
origCoords: GlyphCoordinates: default instance's coordinates for computing 'gvar'
- inferred points (cf. table__g_l_y_f.getCoordinatesAndControls).
+ inferred points (cf. table__g_l_y_f._getCoordinatesAndControls).
endPts: List[int]: indices of contour end points, for inferring 'gvar' deltas.
Returns:
@@ -323,14 +329,11 @@ def limitTupleVariationAxisRange(var, axisTag, axisRange):
return [var, newVar]
-def instantiateGvarGlyph(varfont, glyphname, axisLimits, optimize=True):
- glyf = varfont["glyf"]
- coordinates, ctrl = glyf.getCoordinatesAndControls(glyphname, varfont)
+def _instantiateGvarGlyph(glyphname, glyf, gvar, hMetrics, vMetrics, axisLimits, optimize=True):
+ coordinates, ctrl = glyf._getCoordinatesAndControls(glyphname, hMetrics, vMetrics)
endPts = ctrl.endPts
- gvar = varfont["gvar"]
- # when exporting to TTX, a glyph with no variations is omitted; thus when loading
- # a TTFont from TTX, a glyph that's present in glyf table may be missing from gvar.
+ # Not every glyph may have variations
tupleVarStore = gvar.variations.get(glyphname)
if tupleVarStore:
@@ -341,7 +344,7 @@ def instantiateGvarGlyph(varfont, glyphname, axisLimits, optimize=True):
if defaultDeltas:
coordinates += _g_l_y_f.GlyphCoordinates(defaultDeltas)
- # setCoordinates also sets the hmtx/vmtx advance widths and sidebearings from
+ # _setCoordinates also sets the hmtx/vmtx advance widths and sidebearings from
# the four phantom points and glyph bounding boxes.
# We call it unconditionally even if a glyph has no variations or no deltas are
# applied at this location, in case the glyph's xMin and in turn its sidebearing
@@ -350,7 +353,7 @@ def instantiateGvarGlyph(varfont, glyphname, axisLimits, optimize=True):
# gvar table is empty; however, the composite's base glyph may have deltas
# applied, hence the composite's bbox and left/top sidebearings may need updating
# in the instanced font.
- glyf.setCoordinates(glyphname, coordinates, varfont)
+ glyf._setCoordinates(glyphname, coordinates, hMetrics, vMetrics)
if not tupleVarStore:
if glyphname in gvar.variations:
@@ -362,12 +365,22 @@ def instantiateGvarGlyph(varfont, glyphname, axisLimits, optimize=True):
for var in tupleVarStore:
var.optimize(coordinates, endPts, isComposite)
+def instantiateGvarGlyph(varfont, glyphname, axisLimits, optimize=True):
+ """Remove?
+ https://github.com/fonttools/fonttools/pull/2266"""
+ gvar = varfont["gvar"]
+ glyf = varfont["glyf"]
+ hMetrics = varfont['hmtx'].metrics
+ vMetrics = getattr(varfont.get('vmtx'), 'metrics', None)
+ _instantiateGvarGlyph(glyphname, glyf, gvar, hMetrics, vMetrics, axisLimits, optimize=optimize)
def instantiateGvar(varfont, axisLimits, optimize=True):
log.info("Instantiating glyf/gvar tables")
gvar = varfont["gvar"]
glyf = varfont["glyf"]
+ hMetrics = varfont['hmtx'].metrics
+ vMetrics = getattr(varfont.get('vmtx'), 'metrics', None)
# Get list of glyph names sorted by component depth.
# If a composite glyph is processed before its base glyph, the bounds may
# be calculated incorrectly because deltas haven't been applied to the
@@ -382,7 +395,7 @@ def instantiateGvar(varfont, axisLimits, optimize=True):
),
)
for glyphname in glyphnames:
- instantiateGvarGlyph(varfont, glyphname, axisLimits, optimize=optimize)
+ _instantiateGvarGlyph(glyphname, glyf, gvar, hMetrics, vMetrics, axisLimits, optimize=optimize)
if not gvar.variations:
del varfont["gvar"]
@@ -1163,7 +1176,8 @@ def instantiateVariableFont(
If the value is `None`, the default coordinate as per 'fvar' table for
that axis is used.
The limit values can also be (min, max) tuples for restricting an
- axis's variation range, but this is not implemented yet.
+ axis's variation range. The default axis value must be included in
+ the new range.
inplace (bool): whether to modify input TTFont object in-place instead of
returning a distinct object.
optimize (bool): if False, do not perform IUP-delta optimization on the
@@ -1177,7 +1191,8 @@ def instantiateVariableFont(
on all glyphs to maximise cross-compatibility of the generated instance.
You can disable this by passing OverlapMode.KEEP_AND_DONT_SET_FLAGS.
If you want to remove the overlaps altogether and merge overlapping
- contours and components, you can pass OverlapMode.REMOVE. Note that this
+ contours and components, you can pass OverlapMode.REMOVE (or
+ REMOVE_AND_IGNORE_ERRORS to not hard-fail on tricky glyphs). Note that this
requires the skia-pathops package (available to pip install).
The overlap parameter only has effect when generating full static instances.
updateFontNames (bool): if True, update the instantiated font's name table using
@@ -1236,11 +1251,14 @@ def instantiateVariableFont(
if "glyf" in varfont:
if overlap == OverlapMode.KEEP_AND_SET_FLAGS:
setMacOverlapFlags(varfont["glyf"])
- elif overlap == OverlapMode.REMOVE:
+ elif overlap in (OverlapMode.REMOVE, OverlapMode.REMOVE_AND_IGNORE_ERRORS):
from fontTools.ttLib.removeOverlaps import removeOverlaps
log.info("Removing overlaps from glyf table")
- removeOverlaps(varfont)
+ removeOverlaps(
+ varfont,
+ ignoreErrors=(overlap == OverlapMode.REMOVE_AND_IGNORE_ERRORS),
+ )
varLib.set_default_weight_width_slant(
varfont,
@@ -1348,6 +1366,12 @@ def parseArgs(args):
"when generating a full instance). Requires skia-pathops",
)
parser.add_argument(
+ "--ignore-overlap-errors",
+ dest="ignore_overlap_errors",
+ action="store_true",
+ help="Don't crash if the remove-overlaps operation fails for some glyphs.",
+ )
+ parser.add_argument(
"--update-name-table",
action="store_true",
help="Update the instantiated font's `name` table. Input font must have "
@@ -1363,7 +1387,10 @@ def parseArgs(args):
options = parser.parse_args(args)
if options.remove_overlaps:
- options.overlap = OverlapMode.REMOVE
+ if options.ignore_overlap_errors:
+ options.overlap = OverlapMode.REMOVE_AND_IGNORE_ERRORS
+ else:
+ options.overlap = OverlapMode.REMOVE
else:
options.overlap = OverlapMode(int(options.overlap))
diff --git a/Lib/fontTools/varLib/merger.py b/Lib/fontTools/varLib/merger.py
index c9d14381..5a3a4f34 100644
--- a/Lib/fontTools/varLib/merger.py
+++ b/Lib/fontTools/varLib/merger.py
@@ -1,8 +1,10 @@
"""
Merge OpenType Layout tables (GDEF / GPOS / GSUB).
"""
+import os
import copy
from operator import ior
+import logging
from fontTools.misc import classifyTools
from fontTools.misc.roundTools import otRound
from fontTools.ttLib.tables import otTables as ot
@@ -13,6 +15,13 @@ from fontTools.varLib.models import nonNone, allNone, allEqual, allEqualTo
from fontTools.varLib.varStore import VarStoreInstancer
from functools import reduce
from fontTools.otlLib.builder import buildSinglePos
+from fontTools.otlLib.optimize.gpos import (
+ compact_pair_pos,
+ GPOS_COMPACT_MODE_DEFAULT,
+ GPOS_COMPACT_MODE_ENV_KEY,
+)
+
+log = logging.getLogger("fontTools.varLib.merger")
from .errors import (
ShouldBeConstant,
@@ -143,7 +152,7 @@ class AligningMerger(Merger):
def merge(merger, self, lst):
if self is None:
if not allNone(lst):
- raise NotANone(self, expected=None, got=lst)
+ raise NotANone(merger, expected=None, got=lst)
return
lst = [l.classDefs for l in lst]
@@ -156,7 +165,7 @@ def merge(merger, self, lst):
for k in allKeys:
allValues = nonNone(l.get(k) for l in lst)
if not allEqual(allValues):
- raise ShouldBeConstant(self, expected=allValues[0], got=lst, stack="."+k)
+ raise ShouldBeConstant(merger, expected=allValues[0], got=lst, stack=["." + k])
if not allValues:
self[k] = None
else:
@@ -193,7 +202,7 @@ def _merge_GlyphOrders(font, lst, values_lst=None, default=None):
order = sorted(combined, key=sortKey)
# Make sure all input glyphsets were in proper order
if not all(sorted(vs, key=sortKey) == vs for vs in lst):
- raise InconsistentGlyphOrder(self)
+ raise InconsistentGlyphOrder()
del combined
paddedValues = None
@@ -208,7 +217,7 @@ def _merge_GlyphOrders(font, lst, values_lst=None, default=None):
for dict_set in dict_sets]
return order, padded
-def _Lookup_SinglePos_get_effective_value(subtables, glyph):
+def _Lookup_SinglePos_get_effective_value(merger, subtables, glyph):
for self in subtables:
if self is None or \
type(self) != ot.SinglePos or \
@@ -220,10 +229,10 @@ def _Lookup_SinglePos_get_effective_value(subtables, glyph):
elif self.Format == 2:
return self.Value[self.Coverage.glyphs.index(glyph)]
else:
- raise UnsupportedFormat(self, subtable="single positioning lookup")
+ raise UnsupportedFormat(merger, subtable="single positioning lookup")
return None
-def _Lookup_PairPos_get_effective_value_pair(subtables, firstGlyph, secondGlyph):
+def _Lookup_PairPos_get_effective_value_pair(merger, subtables, firstGlyph, secondGlyph):
for self in subtables:
if self is None or \
type(self) != ot.PairPos or \
@@ -242,20 +251,21 @@ def _Lookup_PairPos_get_effective_value_pair(subtables, firstGlyph, secondGlyph)
klass2 = self.ClassDef2.classDefs.get(secondGlyph, 0)
return self.Class1Record[klass1].Class2Record[klass2]
else:
- raise UnsupportedFormat(self, subtable="pair positioning lookup")
+ raise UnsupportedFormat(merger, subtable="pair positioning lookup")
return None
@AligningMerger.merger(ot.SinglePos)
def merge(merger, self, lst):
self.ValueFormat = valueFormat = reduce(int.__or__, [l.ValueFormat for l in lst], 0)
if not (len(lst) == 1 or (valueFormat & ~0xF == 0)):
- raise UnsupportedFormat(self, subtable="single positioning lookup")
+ raise UnsupportedFormat(merger, subtable="single positioning lookup")
# If all have same coverage table and all are format 1,
coverageGlyphs = self.Coverage.glyphs
if all(v.Format == 1 for v in lst) and all(coverageGlyphs == v.Coverage.glyphs for v in lst):
- self.Value = otBase.ValueRecord(valueFormat)
- merger.mergeThings(self.Value, [v.Value for v in lst])
+ self.Value = otBase.ValueRecord(valueFormat, self.Value)
+ if valueFormat != 0:
+ merger.mergeThings(self.Value, [v.Value for v in lst])
self.ValueFormat = self.Value.getFormat()
return
@@ -279,7 +289,7 @@ def merge(merger, self, lst):
# Note!!! This *might* result in behavior change if ValueFormat2-zeroedness
# is different between used subtable and current subtable!
# TODO(behdad) Check and warn if that happens?
- v = _Lookup_SinglePos_get_effective_value(merger.lookup_subtables[i], glyph)
+ v = _Lookup_SinglePos_get_effective_value(merger, merger.lookup_subtables[i], glyph)
if v is None:
v = otBase.ValueRecord(valueFormat)
values[j] = v
@@ -288,8 +298,8 @@ def merge(merger, self, lst):
# Merge everything else; though, there shouldn't be anything else. :)
merger.mergeObjects(self, lst,
- exclude=('Format', 'Coverage', 'Value', 'ValueCount'))
- self.ValueFormat = reduce(int.__or__, [v.getFormat() for v in self.Value], 0)
+ exclude=('Format', 'Coverage', 'Value', 'ValueCount', 'ValueFormat'))
+ self.ValueFormat = reduce(int.__or__, [v.getEffectiveFormat() for v in self.Value], 0)
@AligningMerger.merger(ot.PairSet)
def merge(merger, self, lst):
@@ -315,7 +325,9 @@ def merge(merger, self, lst):
if values[j] is not None:
vpair = values[j]
else:
- vpair = _Lookup_PairPos_get_effective_value_pair(merger.lookup_subtables[i], self._firstGlyph, glyph)
+ vpair = _Lookup_PairPos_get_effective_value_pair(
+ merger, merger.lookup_subtables[i], self._firstGlyph, glyph
+ )
if vpair is None:
v1, v2 = None, None
else:
@@ -518,7 +530,7 @@ def merge(merger, self, lst):
elif self.Format == 2:
_PairPosFormat2_merge(self, lst, merger)
else:
- raise UnsupportedFormat(self, subtable="pair positioning lookup")
+ raise UnsupportedFormat(merger, subtable="pair positioning lookup")
del merger.valueFormat1, merger.valueFormat2
@@ -584,8 +596,7 @@ def _MarkBasePosFormat1_merge(self, lst, merger, Mark='Mark', Base='Base'):
# input masters.
if not allEqual(allClasses):
- raise allClasses(self, allClasses)
- rec = None
+ raise ShouldBeConstant(merger, expected=allClasses[0], got=allClasses)
else:
rec = ot.MarkRecord()
rec.Class = allClasses[0]
@@ -633,7 +644,8 @@ def _MarkBasePosFormat1_merge(self, lst, merger, Mark='Mark', Base='Base'):
@AligningMerger.merger(ot.MarkBasePos)
def merge(merger, self, lst):
if not allEqualTo(self.Format, (l.Format for l in lst)):
- raise InconsistentFormats(self,
+ raise InconsistentFormats(
+ merger,
subtable="mark-to-base positioning lookup",
expected=self.Format,
got=[l.Format for l in lst]
@@ -641,12 +653,13 @@ def merge(merger, self, lst):
if self.Format == 1:
_MarkBasePosFormat1_merge(self, lst, merger)
else:
- raise UnsupportedFormat(self, subtable="mark-to-base positioning lookup")
+ raise UnsupportedFormat(merger, subtable="mark-to-base positioning lookup")
@AligningMerger.merger(ot.MarkMarkPos)
def merge(merger, self, lst):
if not allEqualTo(self.Format, (l.Format for l in lst)):
- raise InconsistentFormats(self,
+ raise InconsistentFormats(
+ merger,
subtable="mark-to-mark positioning lookup",
expected=self.Format,
got=[l.Format for l in lst]
@@ -654,7 +667,7 @@ def merge(merger, self, lst):
if self.Format == 1:
_MarkBasePosFormat1_merge(self, lst, merger, 'Mark1', 'Mark2')
else:
- raise UnsupportedFormat(self, subtable="mark-to-mark positioning lookup")
+ raise UnsupportedFormat(merger, subtable="mark-to-mark positioning lookup")
def _PairSet_flatten(lst, font):
self = ot.PairSet()
@@ -780,12 +793,13 @@ def merge(merger, self, lst):
continue
if sts[0].__class__.__name__.startswith('Extension'):
if not allEqual([st.__class__ for st in sts]):
- raise InconsistentExtensions(self,
+ raise InconsistentExtensions(
+ merger,
expected="Extension",
got=[st.__class__.__name__ for st in sts]
)
if not allEqual([st.ExtensionLookupType for st in sts]):
- raise InconsistentExtensions(self)
+ raise InconsistentExtensions(merger)
l.LookupType = sts[0].ExtensionLookupType
new_sts = [st.ExtSubTable for st in sts]
del sts[:]
@@ -833,6 +847,15 @@ def merge(merger, self, lst):
self.SubTable.pop(-1)
self.SubTableCount -= 1
+ # Compact the merged subtables
+ # This is a good moment to do it because the compaction should create
+ # smaller subtables, which may prevent overflows from happening.
+ mode = os.environ.get(GPOS_COMPACT_MODE_ENV_KEY, GPOS_COMPACT_MODE_DEFAULT)
+ if mode and mode != "0":
+ log.info("Compacting GPOS...")
+ self.SubTable = compact_pair_pos(merger.font, mode, self.SubTable)
+ self.SubTableCount = len(self.SubTable)
+
elif isSinglePos and flattened:
singlePosTable = self.SubTable[0]
glyphs = singlePosTable.Coverage.glyphs
@@ -847,7 +870,6 @@ def merge(merger, self, lst):
del merger.lookup_subtables
-
#
# InstancerMerger
#
@@ -989,7 +1011,7 @@ def merge(merger, self, lst):
varidx = (dev.StartSize << 16) + dev.EndSize
delta = otRound(instancer[varidx])
- setattr(self, name, getattr(self, name) + delta)
+ setattr(self, name, getattr(self, name, 0) + delta)
#
@@ -1035,7 +1057,7 @@ def buildVarDevTable(store_builder, master_values):
@VariationMerger.merger(ot.BaseCoord)
def merge(merger, self, lst):
if self.Format != 1:
- raise UnsupportedFormat(self, subtable="a baseline coordinate")
+ raise UnsupportedFormat(merger, subtable="a baseline coordinate")
self.Coordinate, DeviceTable = buildVarDevTable(merger.store_builder, [a.Coordinate for a in lst])
if DeviceTable:
self.Format = 3
@@ -1044,7 +1066,7 @@ def merge(merger, self, lst):
@VariationMerger.merger(ot.CaretValue)
def merge(merger, self, lst):
if self.Format != 1:
- raise UnsupportedFormat(self, subtable="a caret")
+ raise UnsupportedFormat(merger, subtable="a caret")
self.Coordinate, DeviceTable = buildVarDevTable(merger.store_builder, [a.Coordinate for a in lst])
if DeviceTable:
self.Format = 3
@@ -1053,7 +1075,7 @@ def merge(merger, self, lst):
@VariationMerger.merger(ot.Anchor)
def merge(merger, self, lst):
if self.Format != 1:
- raise UnsupportedFormat(self, subtable="an anchor")
+ raise UnsupportedFormat(merger, subtable="an anchor")
self.XCoordinate, XDeviceTable = buildVarDevTable(merger.store_builder, [a.XCoordinate for a in lst])
self.YCoordinate, YDeviceTable = buildVarDevTable(merger.store_builder, [a.YCoordinate for a in lst])
if XDeviceTable or YDeviceTable:
diff --git a/Lib/fontTools/varLib/models.py b/Lib/fontTools/varLib/models.py
index 9296deda..c548fbca 100644
--- a/Lib/fontTools/varLib/models.py
+++ b/Lib/fontTools/varLib/models.py
@@ -1,484 +1,530 @@
"""Variation fonts interpolation models."""
-__all__ = ['nonNone', 'allNone', 'allEqual', 'allEqualTo', 'subList',
- 'normalizeValue', 'normalizeLocation',
- 'supportScalar',
- 'VariationModel']
+__all__ = [
+ "nonNone",
+ "allNone",
+ "allEqual",
+ "allEqualTo",
+ "subList",
+ "normalizeValue",
+ "normalizeLocation",
+ "supportScalar",
+ "VariationModel",
+]
from fontTools.misc.roundTools import noRound
from .errors import VariationModelError
def nonNone(lst):
- return [l for l in lst if l is not None]
+ return [l for l in lst if l is not None]
+
def allNone(lst):
- return all(l is None for l in lst)
+ return all(l is None for l in lst)
+
def allEqualTo(ref, lst, mapper=None):
- if mapper is None:
- return all(ref == item for item in lst)
- else:
- mapped = mapper(ref)
- return all(mapped == mapper(item) for item in lst)
+ if mapper is None:
+ return all(ref == item for item in lst)
+
+ mapped = mapper(ref)
+ return all(mapped == mapper(item) for item in lst)
+
def allEqual(lst, mapper=None):
- if not lst:
- return True
- it = iter(lst)
- try:
- first = next(it)
- except StopIteration:
- return True
- return allEqualTo(first, it, mapper=mapper)
+ if not lst:
+ return True
+ it = iter(lst)
+ try:
+ first = next(it)
+ except StopIteration:
+ return True
+ return allEqualTo(first, it, mapper=mapper)
+
def subList(truth, lst):
- assert len(truth) == len(lst)
- return [l for l,t in zip(lst,truth) if t]
+ assert len(truth) == len(lst)
+ return [l for l, t in zip(lst, truth) if t]
+
def normalizeValue(v, triple):
- """Normalizes value based on a min/default/max triple.
- >>> normalizeValue(400, (100, 400, 900))
- 0.0
- >>> normalizeValue(100, (100, 400, 900))
- -1.0
- >>> normalizeValue(650, (100, 400, 900))
- 0.5
- """
- lower, default, upper = triple
- if not (lower <= default <= upper):
- raise ValueError(
- f"Invalid axis values, must be minimum, default, maximum: "
- f"{lower:3.3f}, {default:3.3f}, {upper:3.3f}"
- )
- v = max(min(v, upper), lower)
- if v == default:
- v = 0.
- elif v < default:
- v = (v - default) / (default - lower)
- else:
- v = (v - default) / (upper - default)
- return v
+ """Normalizes value based on a min/default/max triple.
+ >>> normalizeValue(400, (100, 400, 900))
+ 0.0
+ >>> normalizeValue(100, (100, 400, 900))
+ -1.0
+ >>> normalizeValue(650, (100, 400, 900))
+ 0.5
+ """
+ lower, default, upper = triple
+ if not (lower <= default <= upper):
+ raise ValueError(
+ f"Invalid axis values, must be minimum, default, maximum: "
+ f"{lower:3.3f}, {default:3.3f}, {upper:3.3f}"
+ )
+ v = max(min(v, upper), lower)
+ if v == default:
+ v = 0.0
+ elif v < default:
+ v = (v - default) / (default - lower)
+ else:
+ v = (v - default) / (upper - default)
+ return v
+
def normalizeLocation(location, axes):
- """Normalizes location based on axis min/default/max values from axes.
- >>> axes = {"wght": (100, 400, 900)}
- >>> normalizeLocation({"wght": 400}, axes)
- {'wght': 0.0}
- >>> normalizeLocation({"wght": 100}, axes)
- {'wght': -1.0}
- >>> normalizeLocation({"wght": 900}, axes)
- {'wght': 1.0}
- >>> normalizeLocation({"wght": 650}, axes)
- {'wght': 0.5}
- >>> normalizeLocation({"wght": 1000}, axes)
- {'wght': 1.0}
- >>> normalizeLocation({"wght": 0}, axes)
- {'wght': -1.0}
- >>> axes = {"wght": (0, 0, 1000)}
- >>> normalizeLocation({"wght": 0}, axes)
- {'wght': 0.0}
- >>> normalizeLocation({"wght": -1}, axes)
- {'wght': 0.0}
- >>> normalizeLocation({"wght": 1000}, axes)
- {'wght': 1.0}
- >>> normalizeLocation({"wght": 500}, axes)
- {'wght': 0.5}
- >>> normalizeLocation({"wght": 1001}, axes)
- {'wght': 1.0}
- >>> axes = {"wght": (0, 1000, 1000)}
- >>> normalizeLocation({"wght": 0}, axes)
- {'wght': -1.0}
- >>> normalizeLocation({"wght": -1}, axes)
- {'wght': -1.0}
- >>> normalizeLocation({"wght": 500}, axes)
- {'wght': -0.5}
- >>> normalizeLocation({"wght": 1000}, axes)
- {'wght': 0.0}
- >>> normalizeLocation({"wght": 1001}, axes)
- {'wght': 0.0}
- """
- out = {}
- for tag,triple in axes.items():
- v = location.get(tag, triple[1])
- out[tag] = normalizeValue(v, triple)
- return out
+ """Normalizes location based on axis min/default/max values from axes.
+ >>> axes = {"wght": (100, 400, 900)}
+ >>> normalizeLocation({"wght": 400}, axes)
+ {'wght': 0.0}
+ >>> normalizeLocation({"wght": 100}, axes)
+ {'wght': -1.0}
+ >>> normalizeLocation({"wght": 900}, axes)
+ {'wght': 1.0}
+ >>> normalizeLocation({"wght": 650}, axes)
+ {'wght': 0.5}
+ >>> normalizeLocation({"wght": 1000}, axes)
+ {'wght': 1.0}
+ >>> normalizeLocation({"wght": 0}, axes)
+ {'wght': -1.0}
+ >>> axes = {"wght": (0, 0, 1000)}
+ >>> normalizeLocation({"wght": 0}, axes)
+ {'wght': 0.0}
+ >>> normalizeLocation({"wght": -1}, axes)
+ {'wght': 0.0}
+ >>> normalizeLocation({"wght": 1000}, axes)
+ {'wght': 1.0}
+ >>> normalizeLocation({"wght": 500}, axes)
+ {'wght': 0.5}
+ >>> normalizeLocation({"wght": 1001}, axes)
+ {'wght': 1.0}
+ >>> axes = {"wght": (0, 1000, 1000)}
+ >>> normalizeLocation({"wght": 0}, axes)
+ {'wght': -1.0}
+ >>> normalizeLocation({"wght": -1}, axes)
+ {'wght': -1.0}
+ >>> normalizeLocation({"wght": 500}, axes)
+ {'wght': -0.5}
+ >>> normalizeLocation({"wght": 1000}, axes)
+ {'wght': 0.0}
+ >>> normalizeLocation({"wght": 1001}, axes)
+ {'wght': 0.0}
+ """
+ out = {}
+ for tag, triple in axes.items():
+ v = location.get(tag, triple[1])
+ out[tag] = normalizeValue(v, triple)
+ return out
+
def supportScalar(location, support, ot=True):
- """Returns the scalar multiplier at location, for a master
- with support. If ot is True, then a peak value of zero
- for support of an axis means "axis does not participate". That
- is how OpenType Variation Font technology works.
- >>> supportScalar({}, {})
- 1.0
- >>> supportScalar({'wght':.2}, {})
- 1.0
- >>> supportScalar({'wght':.2}, {'wght':(0,2,3)})
- 0.1
- >>> supportScalar({'wght':2.5}, {'wght':(0,2,4)})
- 0.75
- >>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
- 0.75
- >>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}, ot=False)
- 0.375
- >>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
- 0.75
- >>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
- 0.75
- """
- scalar = 1.
- for axis,(lower,peak,upper) in support.items():
- if ot:
- # OpenType-specific case handling
- if peak == 0.:
- continue
- if lower > peak or peak > upper:
- continue
- if lower < 0. and upper > 0.:
- continue
- v = location.get(axis, 0.)
- else:
- assert axis in location
- v = location[axis]
- if v == peak:
- continue
- if v <= lower or upper <= v:
- scalar = 0.
- break
- if v < peak:
- scalar *= (v - lower) / (peak - lower)
- else: # v > peak
- scalar *= (v - upper) / (peak - upper)
- return scalar
+ """Returns the scalar multiplier at location, for a master
+ with support. If ot is True, then a peak value of zero
+ for support of an axis means "axis does not participate". That
+ is how OpenType Variation Font technology works.
+ >>> supportScalar({}, {})
+ 1.0
+ >>> supportScalar({'wght':.2}, {})
+ 1.0
+ >>> supportScalar({'wght':.2}, {'wght':(0,2,3)})
+ 0.1
+ >>> supportScalar({'wght':2.5}, {'wght':(0,2,4)})
+ 0.75
+ >>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
+ 0.75
+ >>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}, ot=False)
+ 0.375
+ >>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
+ 0.75
+ >>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
+ 0.75
+ """
+ scalar = 1.0
+ for axis, (lower, peak, upper) in support.items():
+ if ot:
+ # OpenType-specific case handling
+ if peak == 0.0:
+ continue
+ if lower > peak or peak > upper:
+ continue
+ if lower < 0.0 and upper > 0.0:
+ continue
+ v = location.get(axis, 0.0)
+ else:
+ assert axis in location
+ v = location[axis]
+ if v == peak:
+ continue
+ if v <= lower or upper <= v:
+ scalar = 0.0
+ break
+ if v < peak:
+ scalar *= (v - lower) / (peak - lower)
+ else: # v > peak
+ scalar *= (v - upper) / (peak - upper)
+ return scalar
class VariationModel(object):
- """
- Locations must be in normalized space. Ie. base master
- is at origin (0).
- >>> from pprint import pprint
- >>> locations = [ \
- {'wght':100}, \
- {'wght':-100}, \
- {'wght':-180}, \
- {'wdth':+.3}, \
- {'wght':+120,'wdth':.3}, \
- {'wght':+120,'wdth':.2}, \
- {}, \
- {'wght':+180,'wdth':.3}, \
- {'wght':+180}, \
- ]
- >>> model = VariationModel(locations, axisOrder=['wght'])
- >>> pprint(model.locations)
- [{},
- {'wght': -100},
- {'wght': -180},
- {'wght': 100},
- {'wght': 180},
- {'wdth': 0.3},
- {'wdth': 0.3, 'wght': 180},
- {'wdth': 0.3, 'wght': 120},
- {'wdth': 0.2, 'wght': 120}]
- >>> pprint(model.deltaWeights)
- [{},
- {0: 1.0},
- {0: 1.0},
- {0: 1.0},
- {0: 1.0},
- {0: 1.0},
- {0: 1.0, 4: 1.0, 5: 1.0},
- {0: 1.0, 3: 0.75, 4: 0.25, 5: 1.0, 6: 0.6666666666666666},
- {0: 1.0,
- 3: 0.75,
- 4: 0.25,
- 5: 0.6666666666666667,
- 6: 0.4444444444444445,
- 7: 0.6666666666666667}]
+ """
+ Locations must be in normalized space. Ie. base master
+ is at origin (0)::
+
+ >>> from pprint import pprint
+ >>> locations = [ \
+ {'wght':100}, \
+ {'wght':-100}, \
+ {'wght':-180}, \
+ {'wdth':+.3}, \
+ {'wght':+120,'wdth':.3}, \
+ {'wght':+120,'wdth':.2}, \
+ {}, \
+ {'wght':+180,'wdth':.3}, \
+ {'wght':+180}, \
+ ]
+ >>> model = VariationModel(locations, axisOrder=['wght'])
+ >>> pprint(model.locations)
+ [{},
+ {'wght': -100},
+ {'wght': -180},
+ {'wght': 100},
+ {'wght': 180},
+ {'wdth': 0.3},
+ {'wdth': 0.3, 'wght': 180},
+ {'wdth': 0.3, 'wght': 120},
+ {'wdth': 0.2, 'wght': 120}]
+ >>> pprint(model.deltaWeights)
+ [{},
+ {0: 1.0},
+ {0: 1.0},
+ {0: 1.0},
+ {0: 1.0},
+ {0: 1.0},
+ {0: 1.0, 4: 1.0, 5: 1.0},
+ {0: 1.0, 3: 0.75, 4: 0.25, 5: 1.0, 6: 0.6666666666666666},
+ {0: 1.0,
+ 3: 0.75,
+ 4: 0.25,
+ 5: 0.6666666666666667,
+ 6: 0.4444444444444445,
+ 7: 0.6666666666666667}]
"""
- def __init__(self, locations, axisOrder=None):
- if len(set(tuple(sorted(l.items())) for l in locations)) != len(locations):
- raise VariationModelError("Locations must be unique.")
-
- self.origLocations = locations
- self.axisOrder = axisOrder if axisOrder is not None else []
-
- locations = [{k:v for k,v in loc.items() if v != 0.} for loc in locations]
- keyFunc = self.getMasterLocationsSortKeyFunc(locations, axisOrder=self.axisOrder)
- self.locations = sorted(locations, key=keyFunc)
-
- # Mapping from user's master order to our master order
- self.mapping = [self.locations.index(l) for l in locations]
- self.reverseMapping = [locations.index(l) for l in self.locations]
-
- self._computeMasterSupports(keyFunc.axisPoints)
- self._subModels = {}
-
- def getSubModel(self, items):
- if None not in items:
- return self, items
- key = tuple(v is not None for v in items)
- subModel = self._subModels.get(key)
- if subModel is None:
- subModel = VariationModel(subList(key, self.origLocations), self.axisOrder)
- self._subModels[key] = subModel
- return subModel, subList(key, items)
-
- @staticmethod
- def getMasterLocationsSortKeyFunc(locations, axisOrder=[]):
- if {} not in locations:
- raise VariationModelError("Base master not found.")
- axisPoints = {}
- for loc in locations:
- if len(loc) != 1:
- continue
- axis = next(iter(loc))
- value = loc[axis]
- if axis not in axisPoints:
- axisPoints[axis] = {0.}
- assert value not in axisPoints[axis], (
- 'Value "%s" in axisPoints["%s"] --> %s' % (value, axis, axisPoints)
- )
- axisPoints[axis].add(value)
-
- def getKey(axisPoints, axisOrder):
- def sign(v):
- return -1 if v < 0 else +1 if v > 0 else 0
- def key(loc):
- rank = len(loc)
- onPointAxes = [
- axis for axis, value in loc.items()
- if axis in axisPoints
- and value in axisPoints[axis]
- ]
- orderedAxes = [axis for axis in axisOrder if axis in loc]
- orderedAxes.extend([axis for axis in sorted(loc.keys()) if axis not in axisOrder])
- return (
- rank, # First, order by increasing rank
- -len(onPointAxes), # Next, by decreasing number of onPoint axes
- tuple(axisOrder.index(axis) if axis in axisOrder else 0x10000 for axis in orderedAxes), # Next, by known axes
- tuple(orderedAxes), # Next, by all axes
- tuple(sign(loc[axis]) for axis in orderedAxes), # Next, by signs of axis values
- tuple(abs(loc[axis]) for axis in orderedAxes), # Next, by absolute value of axis values
- )
- return key
-
- ret = getKey(axisPoints, axisOrder)
- ret.axisPoints = axisPoints
- return ret
-
- def reorderMasters(self, master_list, mapping):
- # For changing the master data order without
- # recomputing supports and deltaWeights.
- new_list = [master_list[idx] for idx in mapping]
- self.origLocations = [self.origLocations[idx] for idx in mapping]
- locations = [{k:v for k,v in loc.items() if v != 0.}
- for loc in self.origLocations]
- self.mapping = [self.locations.index(l) for l in locations]
- self.reverseMapping = [locations.index(l) for l in self.locations]
- self._subModels = {}
- return new_list
-
- def _computeMasterSupports(self, axisPoints):
- supports = []
- regions = self._locationsToRegions()
- for i,region in enumerate(regions):
- locAxes = set(region.keys())
- # Walk over previous masters now
- for j,prev_region in enumerate(regions[:i]):
- # Master with extra axes do not participte
- if not set(prev_region.keys()).issubset(locAxes):
- continue
- # If it's NOT in the current box, it does not participate
- relevant = True
- for axis, (lower,peak,upper) in region.items():
- if axis not in prev_region or not (prev_region[axis][1] == peak or lower < prev_region[axis][1] < upper):
- relevant = False
- break
- if not relevant:
- continue
-
- # Split the box for new master; split in whatever direction
- # that has largest range ratio.
- #
- # For symmetry, we actually cut across multiple axes
- # if they have the largest, equal, ratio.
- # https://github.com/fonttools/fonttools/commit/7ee81c8821671157968b097f3e55309a1faa511e#commitcomment-31054804
-
- bestAxes = {}
- bestRatio = -1
- for axis in prev_region.keys():
- val = prev_region[axis][1]
- assert axis in region
- lower,locV,upper = region[axis]
- newLower, newUpper = lower, upper
- if val < locV:
- newLower = val
- ratio = (val - locV) / (lower - locV)
- elif locV < val:
- newUpper = val
- ratio = (val - locV) / (upper - locV)
- else: # val == locV
- # Can't split box in this direction.
- continue
- if ratio > bestRatio:
- bestAxes = {}
- bestRatio = ratio
- if ratio == bestRatio:
- bestAxes[axis] = (newLower, locV, newUpper)
-
- for axis,triple in bestAxes.items ():
- region[axis] = triple
- supports.append(region)
- self.supports = supports
- self._computeDeltaWeights()
-
- def _locationsToRegions(self):
- locations = self.locations
- # Compute min/max across each axis, use it as total range.
- # TODO Take this as input from outside?
- minV = {}
- maxV = {}
- for l in locations:
- for k,v in l.items():
- minV[k] = min(v, minV.get(k, v))
- maxV[k] = max(v, maxV.get(k, v))
-
- regions = []
- for i,loc in enumerate(locations):
- region = {}
- for axis,locV in loc.items():
- if locV > 0:
- region[axis] = (0, locV, maxV[axis])
- else:
- region[axis] = (minV[axis], locV, 0)
- regions.append(region)
- return regions
-
- def _computeDeltaWeights(self):
- deltaWeights = []
- for i,loc in enumerate(self.locations):
- deltaWeight = {}
- # Walk over previous masters now, populate deltaWeight
- for j,m in enumerate(self.locations[:i]):
- scalar = supportScalar(loc, self.supports[j])
- if scalar:
- deltaWeight[j] = scalar
- deltaWeights.append(deltaWeight)
- self.deltaWeights = deltaWeights
-
- def getDeltas(self, masterValues, *, round=noRound):
- assert len(masterValues) == len(self.deltaWeights)
- mapping = self.reverseMapping
- out = []
- for i,weights in enumerate(self.deltaWeights):
- delta = masterValues[mapping[i]]
- for j,weight in weights.items():
- delta -= out[j] * weight
- out.append(round(delta))
- return out
-
- def getDeltasAndSupports(self, items, *, round=noRound):
- model, items = self.getSubModel(items)
- return model.getDeltas(items, round=round), model.supports
-
- def getScalars(self, loc):
- return [supportScalar(loc, support) for support in self.supports]
-
- @staticmethod
- def interpolateFromDeltasAndScalars(deltas, scalars):
- v = None
- assert len(deltas) == len(scalars)
- for delta, scalar in zip(deltas, scalars):
- if not scalar: continue
- contribution = delta * scalar
- if v is None:
- v = contribution
- else:
- v += contribution
- return v
-
- def interpolateFromDeltas(self, loc, deltas):
- scalars = self.getScalars(loc)
- return self.interpolateFromDeltasAndScalars(deltas, scalars)
-
- def interpolateFromMasters(self, loc, masterValues, *, round=noRound):
- deltas = self.getDeltas(masterValues, round=round)
- return self.interpolateFromDeltas(loc, deltas)
-
- def interpolateFromMastersAndScalars(self, masterValues, scalars, *, round=noRound):
- deltas = self.getDeltas(masterValues, round=round)
- return self.interpolateFromDeltasAndScalars(deltas, scalars)
+ def __init__(self, locations, axisOrder=None):
+ if len(set(tuple(sorted(l.items())) for l in locations)) != len(locations):
+ raise VariationModelError("Locations must be unique.")
+
+ self.origLocations = locations
+ self.axisOrder = axisOrder if axisOrder is not None else []
+
+ locations = [{k: v for k, v in loc.items() if v != 0.0} for loc in locations]
+ keyFunc = self.getMasterLocationsSortKeyFunc(
+ locations, axisOrder=self.axisOrder
+ )
+ self.locations = sorted(locations, key=keyFunc)
+
+ # Mapping from user's master order to our master order
+ self.mapping = [self.locations.index(l) for l in locations]
+ self.reverseMapping = [locations.index(l) for l in self.locations]
+
+ self._computeMasterSupports()
+ self._subModels = {}
+
+ def getSubModel(self, items):
+ if None not in items:
+ return self, items
+ key = tuple(v is not None for v in items)
+ subModel = self._subModels.get(key)
+ if subModel is None:
+ subModel = VariationModel(subList(key, self.origLocations), self.axisOrder)
+ self._subModels[key] = subModel
+ return subModel, subList(key, items)
+
+ @staticmethod
+ def getMasterLocationsSortKeyFunc(locations, axisOrder=[]):
+ if {} not in locations:
+ raise VariationModelError("Base master not found.")
+ axisPoints = {}
+ for loc in locations:
+ if len(loc) != 1:
+ continue
+ axis = next(iter(loc))
+ value = loc[axis]
+ if axis not in axisPoints:
+ axisPoints[axis] = {0.0}
+ assert (
+ value not in axisPoints[axis]
+ ), 'Value "%s" in axisPoints["%s"] --> %s' % (value, axis, axisPoints)
+ axisPoints[axis].add(value)
+
+ def getKey(axisPoints, axisOrder):
+ def sign(v):
+ return -1 if v < 0 else +1 if v > 0 else 0
+
+ def key(loc):
+ rank = len(loc)
+ onPointAxes = [
+ axis
+ for axis, value in loc.items()
+ if axis in axisPoints and value in axisPoints[axis]
+ ]
+ orderedAxes = [axis for axis in axisOrder if axis in loc]
+ orderedAxes.extend(
+ [axis for axis in sorted(loc.keys()) if axis not in axisOrder]
+ )
+ return (
+ rank, # First, order by increasing rank
+ -len(onPointAxes), # Next, by decreasing number of onPoint axes
+ tuple(
+ axisOrder.index(axis) if axis in axisOrder else 0x10000
+ for axis in orderedAxes
+ ), # Next, by known axes
+ tuple(orderedAxes), # Next, by all axes
+ tuple(
+ sign(loc[axis]) for axis in orderedAxes
+ ), # Next, by signs of axis values
+ tuple(
+ abs(loc[axis]) for axis in orderedAxes
+ ), # Next, by absolute value of axis values
+ )
+
+ return key
+
+ ret = getKey(axisPoints, axisOrder)
+ return ret
+
+ def reorderMasters(self, master_list, mapping):
+ # For changing the master data order without
+ # recomputing supports and deltaWeights.
+ new_list = [master_list[idx] for idx in mapping]
+ self.origLocations = [self.origLocations[idx] for idx in mapping]
+ locations = [
+ {k: v for k, v in loc.items() if v != 0.0} for loc in self.origLocations
+ ]
+ self.mapping = [self.locations.index(l) for l in locations]
+ self.reverseMapping = [locations.index(l) for l in self.locations]
+ self._subModels = {}
+ return new_list
+
+ def _computeMasterSupports(self):
+ self.supports = []
+ regions = self._locationsToRegions()
+ for i, region in enumerate(regions):
+ locAxes = set(region.keys())
+ # Walk over previous masters now
+ for prev_region in regions[:i]:
+ # Master with extra axes do not participte
+ if not set(prev_region.keys()).issubset(locAxes):
+ continue
+ # If it's NOT in the current box, it does not participate
+ relevant = True
+ for axis, (lower, peak, upper) in region.items():
+ if axis not in prev_region or not (
+ prev_region[axis][1] == peak
+ or lower < prev_region[axis][1] < upper
+ ):
+ relevant = False
+ break
+ if not relevant:
+ continue
+
+ # Split the box for new master; split in whatever direction
+ # that has largest range ratio.
+ #
+ # For symmetry, we actually cut across multiple axes
+ # if they have the largest, equal, ratio.
+ # https://github.com/fonttools/fonttools/commit/7ee81c8821671157968b097f3e55309a1faa511e#commitcomment-31054804
+
+ bestAxes = {}
+ bestRatio = -1
+ for axis in prev_region.keys():
+ val = prev_region[axis][1]
+ assert axis in region
+ lower, locV, upper = region[axis]
+ newLower, newUpper = lower, upper
+ if val < locV:
+ newLower = val
+ ratio = (val - locV) / (lower - locV)
+ elif locV < val:
+ newUpper = val
+ ratio = (val - locV) / (upper - locV)
+ else: # val == locV
+ # Can't split box in this direction.
+ continue
+ if ratio > bestRatio:
+ bestAxes = {}
+ bestRatio = ratio
+ if ratio == bestRatio:
+ bestAxes[axis] = (newLower, locV, newUpper)
+
+ for axis, triple in bestAxes.items():
+ region[axis] = triple
+ self.supports.append(region)
+ self._computeDeltaWeights()
+
+ def _locationsToRegions(self):
+ locations = self.locations
+ # Compute min/max across each axis, use it as total range.
+ # TODO Take this as input from outside?
+ minV = {}
+ maxV = {}
+ for l in locations:
+ for k, v in l.items():
+ minV[k] = min(v, minV.get(k, v))
+ maxV[k] = max(v, maxV.get(k, v))
+
+ regions = []
+ for loc in locations:
+ region = {}
+ for axis, locV in loc.items():
+ if locV > 0:
+ region[axis] = (0, locV, maxV[axis])
+ else:
+ region[axis] = (minV[axis], locV, 0)
+ regions.append(region)
+ return regions
+
+ def _computeDeltaWeights(self):
+ self.deltaWeights = []
+ for i, loc in enumerate(self.locations):
+ deltaWeight = {}
+ # Walk over previous masters now, populate deltaWeight
+ for j, support in enumerate(self.supports[:i]):
+ scalar = supportScalar(loc, support)
+ if scalar:
+ deltaWeight[j] = scalar
+ self.deltaWeights.append(deltaWeight)
+
+ def getDeltas(self, masterValues, *, round=noRound):
+ assert len(masterValues) == len(self.deltaWeights)
+ mapping = self.reverseMapping
+ out = []
+ for i, weights in enumerate(self.deltaWeights):
+ delta = masterValues[mapping[i]]
+ for j, weight in weights.items():
+ if weight == 1:
+ delta -= out[j]
+ else:
+ delta -= out[j] * weight
+ out.append(round(delta))
+ return out
+
+ def getDeltasAndSupports(self, items, *, round=noRound):
+ model, items = self.getSubModel(items)
+ return model.getDeltas(items, round=round), model.supports
+
+ def getScalars(self, loc):
+ return [supportScalar(loc, support) for support in self.supports]
+
+ @staticmethod
+ def interpolateFromDeltasAndScalars(deltas, scalars):
+ v = None
+ assert len(deltas) == len(scalars)
+ for delta, scalar in zip(deltas, scalars):
+ if not scalar:
+ continue
+ contribution = delta * scalar
+ if v is None:
+ v = contribution
+ else:
+ v += contribution
+ return v
+
+ def interpolateFromDeltas(self, loc, deltas):
+ scalars = self.getScalars(loc)
+ return self.interpolateFromDeltasAndScalars(deltas, scalars)
+
+ def interpolateFromMasters(self, loc, masterValues, *, round=noRound):
+ deltas = self.getDeltas(masterValues, round=round)
+ return self.interpolateFromDeltas(loc, deltas)
+
+ def interpolateFromMastersAndScalars(self, masterValues, scalars, *, round=noRound):
+ deltas = self.getDeltas(masterValues, round=round)
+ return self.interpolateFromDeltasAndScalars(deltas, scalars)
def piecewiseLinearMap(v, mapping):
- keys = mapping.keys()
- if not keys:
- return v
- if v in keys:
- return mapping[v]
- k = min(keys)
- if v < k:
- return v + mapping[k] - k
- k = max(keys)
- if v > k:
- return v + mapping[k] - k
- # Interpolate
- a = max(k for k in keys if k < v)
- b = min(k for k in keys if k > v)
- va = mapping[a]
- vb = mapping[b]
- return va + (vb - va) * (v - a) / (b - a)
+ keys = mapping.keys()
+ if not keys:
+ return v
+ if v in keys:
+ return mapping[v]
+ k = min(keys)
+ if v < k:
+ return v + mapping[k] - k
+ k = max(keys)
+ if v > k:
+ return v + mapping[k] - k
+ # Interpolate
+ a = max(k for k in keys if k < v)
+ b = min(k for k in keys if k > v)
+ va = mapping[a]
+ vb = mapping[b]
+ return va + (vb - va) * (v - a) / (b - a)
def main(args=None):
- """Normalize locations on a given designspace"""
- from fontTools import configLogger
- import argparse
-
- parser = argparse.ArgumentParser(
- "fonttools varLib.models",
- description=main.__doc__,
- )
- parser.add_argument('--loglevel', metavar='LEVEL', default="INFO",
- help="Logging level (defaults to INFO)")
-
- group = parser.add_mutually_exclusive_group(required=True)
- group.add_argument('-d', '--designspace',metavar="DESIGNSPACE",type=str)
- group.add_argument('-l', '--locations', metavar='LOCATION', nargs='+',
- help="Master locations as comma-separate coordinates. One must be all zeros.")
-
- args = parser.parse_args(args)
-
- configLogger(level=args.loglevel)
- from pprint import pprint
-
- if args.designspace:
- from fontTools.designspaceLib import DesignSpaceDocument
- doc = DesignSpaceDocument()
- doc.read(args.designspace)
- locs = [s.location for s in doc.sources]
- print("Original locations:")
- pprint(locs)
- doc.normalize()
- print("Normalized locations:")
- locs = [s.location for s in doc.sources]
- pprint(locs)
- else:
- axes = [chr(c) for c in range(ord('A'), ord('Z')+1)]
- locs = [dict(zip(axes, (float(v) for v in s.split(',')))) for s in args.locations]
-
- model = VariationModel(locs)
- print("Sorted locations:")
- pprint(model.locations)
- print("Supports:")
- pprint(model.supports)
+ """Normalize locations on a given designspace"""
+ from fontTools import configLogger
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ "fonttools varLib.models",
+ description=main.__doc__,
+ )
+ parser.add_argument(
+ "--loglevel",
+ metavar="LEVEL",
+ default="INFO",
+ help="Logging level (defaults to INFO)",
+ )
+
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument("-d", "--designspace", metavar="DESIGNSPACE", type=str)
+ group.add_argument(
+ "-l",
+ "--locations",
+ metavar="LOCATION",
+ nargs="+",
+ help="Master locations as comma-separate coordinates. One must be all zeros.",
+ )
+
+ args = parser.parse_args(args)
+
+ configLogger(level=args.loglevel)
+ from pprint import pprint
+
+ if args.designspace:
+ from fontTools.designspaceLib import DesignSpaceDocument
+
+ doc = DesignSpaceDocument()
+ doc.read(args.designspace)
+ locs = [s.location for s in doc.sources]
+ print("Original locations:")
+ pprint(locs)
+ doc.normalize()
+ print("Normalized locations:")
+ locs = [s.location for s in doc.sources]
+ pprint(locs)
+ else:
+ axes = [chr(c) for c in range(ord("A"), ord("Z") + 1)]
+ locs = [
+ dict(zip(axes, (float(v) for v in s.split(",")))) for s in args.locations
+ ]
+
+ model = VariationModel(locs)
+ print("Sorted locations:")
+ pprint(model.locations)
+ print("Supports:")
+ pprint(model.supports)
+
if __name__ == "__main__":
- import doctest, sys
+ import doctest, sys
- if len(sys.argv) > 1:
- sys.exit(main())
+ if len(sys.argv) > 1:
+ sys.exit(main())
- sys.exit(doctest.testmod().failed)
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/varLib/mutator.py b/Lib/fontTools/varLib/mutator.py
index 02ce4422..263c4e61 100644
--- a/Lib/fontTools/varLib/mutator.py
+++ b/Lib/fontTools/varLib/mutator.py
@@ -138,7 +138,7 @@ def interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc):
# Happens with non-marking glyphs
lsb_delta = 0
else:
- lsb = boundsPen.bounds[0]
+ lsb = otRound(boundsPen.bounds[0])
lsb_delta = entry[1] - lsb
if lsb_delta or width_delta:
@@ -185,6 +185,8 @@ def instantiateVariableFont(varfont, location, inplace=False, overlap=True):
log.info("Mutating glyf/gvar tables")
gvar = varfont['gvar']
glyf = varfont['glyf']
+ hMetrics = varfont['hmtx'].metrics
+ vMetrics = getattr(varfont.get('vmtx'), 'metrics', None)
# get list of glyph names in gvar sorted by component depth
glyphnames = sorted(
gvar.variations.keys(),
@@ -194,7 +196,7 @@ def instantiateVariableFont(varfont, location, inplace=False, overlap=True):
name))
for glyphname in glyphnames:
variations = gvar.variations[glyphname]
- coordinates, _ = glyf.getCoordinatesAndControls(glyphname, varfont)
+ coordinates, _ = glyf._getCoordinatesAndControls(glyphname, hMetrics, vMetrics)
origCoords, endPts = None, None
for var in variations:
scalar = supportScalar(loc, var.axes)
@@ -202,10 +204,10 @@ def instantiateVariableFont(varfont, location, inplace=False, overlap=True):
delta = var.coordinates
if None in delta:
if origCoords is None:
- origCoords, g = glyf.getCoordinatesAndControls(glyphname, varfont)
+ origCoords, g = glyf._getCoordinatesAndControls(glyphname, hMetrics, vMetrics)
delta = iup_delta(delta, origCoords, g.endPts)
coordinates += GlyphCoordinates(delta) * scalar
- glyf.setCoordinates(glyphname, coordinates, varfont)
+ glyf._setCoordinates(glyphname, coordinates, hMetrics, vMetrics)
else:
glyf = None
diff --git a/Lib/fontTools/varLib/varStore.py b/Lib/fontTools/varLib/varStore.py
index 8a382df0..bcf81b39 100644
--- a/Lib/fontTools/varLib/varStore.py
+++ b/Lib/fontTools/varLib/varStore.py
@@ -5,7 +5,6 @@ from fontTools.varLib.builder import (buildVarRegionList, buildVarStore,
buildVarRegion, buildVarData)
from functools import partial
from collections import defaultdict
-from array import array
def _getLocationKey(loc):
@@ -375,12 +374,11 @@ class _Encoding(object):
as a VarData."""
c = 6
while chars:
- if chars & 3:
+ if chars & 0b1111:
c += 2
- chars >>= 2
+ chars >>= 4
return c
-
def _find_yourself_best_new_encoding(self, done_by_width):
self.best_new_encoding = None
for new_width in range(self.width+1, self.width+self.room+1):
@@ -405,14 +403,31 @@ class _EncodingDict(dict):
@staticmethod
def _row_characteristics(row):
"""Returns encoding characteristics for a row."""
+ longWords = False
+
chars = 0
i = 1
for v in row:
if v:
chars += i
if not (-128 <= v <= 127):
- chars += i * 2
- i <<= 2
+ chars += i * 0b0010
+ if not (-32768 <= v <= 32767):
+ longWords = True
+ break
+ i <<= 4
+
+ if longWords:
+ # Redo; only allow 2byte/4byte encoding
+ chars = 0
+ i = 1
+ for v in row:
+ if v:
+ chars += i * 0b0011
+ if not (-32768 <= v <= 32767):
+ chars += i * 0b1100
+ i <<= 4
+
return chars
@@ -423,7 +438,7 @@ def VarStore_optimize(self):
# Check that no two VarRegions are the same; if they are, fold them.
n = len(self.VarRegionList.Region) # Number of columns
- zeroes = array('h', [0]*n)
+ zeroes = [0] * n
front_mapping = {} # Map from old VarIdxes to full row tuples
@@ -435,7 +450,7 @@ def VarStore_optimize(self):
for minor,item in enumerate(data.Item):
- row = array('h', zeroes)
+ row = list(zeroes)
for regionIdx,v in zip(regionIndices, item):
row[regionIdx] += v
row = tuple(row)