aboutsummaryrefslogtreecommitdiff
path: root/Lib
diff options
context:
space:
mode:
authorHaibo Huang <hhb@google.com>2019-05-13 16:38:42 -0700
committerHaibo Huang <hhb@google.com>2019-05-13 16:38:42 -0700
commitd648c2a4868c62db146c13f62b38d749db674ab2 (patch)
treecfeb5f4e886f8902407dc4c5bab3a88513d0fe55 /Lib
parent195ed3228f2e5c0f6a9cc31e9c0f081646eda9eb (diff)
downloadfonttools-d648c2a4868c62db146c13f62b38d749db674ab2.tar.gz
Upgrade fonttools to 3.41.2
Test: None Change-Id: I5263a62cab851cde83c296124418c8e72afffe4e
Diffstat (limited to 'Lib')
-rw-r--r--Lib/fontTools/__init__.py2
-rw-r--r--Lib/fontTools/afmLib.py64
-rw-r--r--Lib/fontTools/cffLib/__init__.py7
-rw-r--r--Lib/fontTools/cffLib/specializer.py201
-rw-r--r--Lib/fontTools/designspaceLib/__init__.py54
-rw-r--r--Lib/fontTools/feaLib/ast.py6
-rw-r--r--Lib/fontTools/feaLib/builder.py4
-rw-r--r--Lib/fontTools/fontBuilder.py9
-rw-r--r--Lib/fontTools/misc/arrayTools.py6
-rw-r--r--Lib/fontTools/misc/cliTools.py2
-rw-r--r--Lib/fontTools/misc/filenames.py2
-rw-r--r--Lib/fontTools/misc/psCharStrings.py10
-rw-r--r--Lib/fontTools/misc/sstruct.py18
-rw-r--r--Lib/fontTools/otlLib/builder.py25
-rw-r--r--Lib/fontTools/otlLib/maxContextCalc.py101
-rw-r--r--Lib/fontTools/subset/__init__.py64
-rw-r--r--Lib/fontTools/subset/cff.py6
-rw-r--r--Lib/fontTools/svgLib/path/__init__.py8
-rw-r--r--Lib/fontTools/svgLib/path/parser.py2
-rw-r--r--Lib/fontTools/svgLib/path/shapes.py56
-rw-r--r--Lib/fontTools/ttLib/tables/S__i_l_f.py12
-rw-r--r--Lib/fontTools/ttLib/tables/S__i_l_l.py2
-rw-r--r--Lib/fontTools/ttLib/tables/_g_l_y_f.py8
-rw-r--r--Lib/fontTools/ttLib/tables/_g_v_a_r.py11
-rw-r--r--Lib/fontTools/ttLib/tables/_h_e_a_d.py3
-rw-r--r--Lib/fontTools/ttLib/tables/otBase.py1
-rw-r--r--Lib/fontTools/ttLib/tables/otTables.py3
-rw-r--r--Lib/fontTools/ufoLib/filenames.py2
-rw-r--r--Lib/fontTools/varLib/__init__.py177
-rw-r--r--Lib/fontTools/varLib/builder.py1
-rwxr-xr-x[-rw-r--r--]Lib/fontTools/varLib/cff.py533
-rw-r--r--Lib/fontTools/varLib/mutator.py31
-rw-r--r--Lib/fontTools/varLib/varStore.py21
-rw-r--r--Lib/fontTools/voltLib/lexer.py7
-rw-r--r--Lib/fontTools/voltLib/parser.py13
-rw-r--r--Lib/fonttools.egg-info/PKG-INFO88
-rw-r--r--Lib/fonttools.egg-info/SOURCES.txt40
37 files changed, 1194 insertions, 406 deletions
diff --git a/Lib/fontTools/__init__.py b/Lib/fontTools/__init__.py
index b0b99739..ad773e89 100644
--- a/Lib/fontTools/__init__.py
+++ b/Lib/fontTools/__init__.py
@@ -5,6 +5,6 @@ from fontTools.misc.loggingTools import configLogger
log = logging.getLogger(__name__)
-version = __version__ = "3.39.0"
+version = __version__ = "3.41.2"
__all__ = ["version", "log", "configLogger"]
diff --git a/Lib/fontTools/afmLib.py b/Lib/fontTools/afmLib.py
index e0ccafee..db01d346 100644
--- a/Lib/fontTools/afmLib.py
+++ b/Lib/fontTools/afmLib.py
@@ -9,52 +9,52 @@ from fontTools.misc.py23 import *
import re
# every single line starts with a "word"
-identifierRE = re.compile("^([A-Za-z]+).*")
+identifierRE = re.compile(r"^([A-Za-z]+).*")
# regular expression to parse char lines
charRE = re.compile(
- "(-?\d+)" # charnum
- "\s*;\s*WX\s+" # ; WX
- "(-?\d+)" # width
- "\s*;\s*N\s+" # ; N
- "([.A-Za-z0-9_]+)" # charname
- "\s*;\s*B\s+" # ; B
- "(-?\d+)" # left
- "\s+"
- "(-?\d+)" # bottom
- "\s+"
- "(-?\d+)" # right
- "\s+"
- "(-?\d+)" # top
- "\s*;\s*" # ;
+ r"(-?\d+)" # charnum
+ r"\s*;\s*WX\s+" # ; WX
+ r"(-?\d+)" # width
+ r"\s*;\s*N\s+" # ; N
+ r"([.A-Za-z0-9_]+)" # charname
+ r"\s*;\s*B\s+" # ; B
+ r"(-?\d+)" # left
+ r"\s+"
+ r"(-?\d+)" # bottom
+ r"\s+"
+ r"(-?\d+)" # right
+ r"\s+"
+ r"(-?\d+)" # top
+ r"\s*;\s*" # ;
)
# regular expression to parse kerning lines
kernRE = re.compile(
- "([.A-Za-z0-9_]+)" # leftchar
- "\s+"
- "([.A-Za-z0-9_]+)" # rightchar
- "\s+"
- "(-?\d+)" # value
- "\s*"
+ r"([.A-Za-z0-9_]+)" # leftchar
+ r"\s+"
+ r"([.A-Za-z0-9_]+)" # rightchar
+ r"\s+"
+ r"(-?\d+)" # value
+ r"\s*"
)
# regular expressions to parse composite info lines of the form:
# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ;
compositeRE = re.compile(
- "([.A-Za-z0-9_]+)" # char name
- "\s+"
- "(\d+)" # number of parts
- "\s*;\s*"
+ r"([.A-Za-z0-9_]+)" # char name
+ r"\s+"
+ r"(\d+)" # number of parts
+ r"\s*;\s*"
)
componentRE = re.compile(
- "PCC\s+" # PPC
- "([.A-Za-z0-9_]+)" # base char name
- "\s+"
- "(-?\d+)" # x offset
- "\s+"
- "(-?\d+)" # y offset
- "\s*;\s*"
+ r"PCC\s+" # PPC
+ r"([.A-Za-z0-9_]+)" # base char name
+ r"\s+"
+ r"(-?\d+)" # x offset
+ r"\s+"
+ r"(-?\d+)" # y offset
+ r"\s*;\s*"
)
preferredAttributeOrder = [
diff --git a/Lib/fontTools/cffLib/__init__.py b/Lib/fontTools/cffLib/__init__.py
index c1750479..3c5f36df 100644
--- a/Lib/fontTools/cffLib/__init__.py
+++ b/Lib/fontTools/cffLib/__init__.py
@@ -207,6 +207,13 @@ class CFFFontSet(object):
continue
name, attrs, content = element
topDict.fromXML(name, attrs, content)
+
+ if hasattr(topDict, "VarStore") and topDict.FDArray[0].vstore is None:
+ fdArray = topDict.FDArray
+ for fontDict in fdArray:
+ if hasattr(fontDict, "Private"):
+ fontDict.Private.vstore = topDict.VarStore
+
elif name == "GlobalSubrs":
subrCharStringClass = psCharStrings.T2CharString
if not hasattr(self, "GlobalSubrs"):
diff --git a/Lib/fontTools/cffLib/specializer.py b/Lib/fontTools/cffLib/specializer.py
index caf8c3b3..db6e5f3d 100644
--- a/Lib/fontTools/cffLib/specializer.py
+++ b/Lib/fontTools/cffLib/specializer.py
@@ -4,6 +4,7 @@
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
+from fontTools.cffLib import maxStackLimit
def stringToProgram(string):
@@ -26,14 +27,21 @@ def programToString(program):
return ' '.join(str(x) for x in program)
-def programToCommands(program):
+def programToCommands(program, getNumRegions=None):
"""Takes a T2CharString program list and returns list of commands.
Each command is a two-tuple of commandname,arg-list. The commandname might
be empty string if no commandname shall be emitted (used for glyph width,
hintmask/cntrmask argument, as well as stray arguments at the end of the
- program (¯\_(ツ)_/¯)."""
+ program (¯\_(ツ)_/¯).
+ 'getNumRegions' may be None, or a callable object. It must return the
+ number of regions. 'getNumRegions' takes a single argument, vsindex. If
+ the vsindex argument is None, getNumRegions returns the default number
+ of regions for the charstring, else it returns the numRegions for
+ the vsindex."""
width = None
+ seenWidthOp = False
+ vsIndex = None
commands = []
stack = []
it = iter(program)
@@ -42,10 +50,37 @@ def programToCommands(program):
stack.append(token)
continue
- if width is None and token in {'hstem', 'hstemhm', 'vstem', 'vstemhm',
- 'cntrmask', 'hintmask',
- 'hmoveto', 'vmoveto', 'rmoveto',
- 'endchar'}:
+ if token == 'blend':
+ assert getNumRegions is not None
+ numSourceFonts = 1 + getNumRegions(vsIndex)
+ # replace the blend op args on the stack with a single list
+ # containing all the blend op args.
+ numBlendOps = stack[-1] * numSourceFonts + 1
+ # replace first blend op by a list of the blend ops.
+ stack[-numBlendOps:] = [stack[-numBlendOps:]]
+
+ # Check for width.
+ if not seenWidthOp:
+ seenWidthOp = True
+ widthLen = len(stack) - numBlendOps
+ if widthLen and (widthLen % 2):
+ stack.pop(0)
+ elif width is not None:
+ commands.pop(0)
+ width = None
+ # We do NOT add the width to the command list if a blend is seen:
+ # if a blend op exists, this is or will be a CFF2 charstring.
+ continue
+
+ elif token == 'vsindex':
+ vsIndex = stack[-1]
+ assert type(vsIndex) is int
+
+ elif (not seenWidthOp) and token in {'hstem', 'hstemhm', 'vstem', 'vstemhm',
+ 'cntrmask', 'hintmask',
+ 'hmoveto', 'vmoveto', 'rmoveto',
+ 'endchar'}:
+ seenWidthOp = True
parity = token in {'hmoveto', 'vmoveto'}
if stack and (len(stack) % 2) ^ parity:
width = stack.pop(0)
@@ -64,11 +99,23 @@ def programToCommands(program):
return commands
+def _flattenBlendArgs(args):
+ token_list = []
+ for arg in args:
+ if isinstance(arg, list):
+ token_list.extend(arg)
+ token_list.append('blend')
+ else:
+ token_list.append(arg)
+ return token_list
+
def commandsToProgram(commands):
"""Takes a commands list as returned by programToCommands() and converts
it back to a T2CharString program list."""
program = []
for op,args in commands:
+ if any(isinstance(arg, list) for arg in args):
+ args = _flattenBlendArgs(args)
program.extend(args)
if op:
program.append(op)
@@ -203,11 +250,58 @@ class _GeneralizerDecombinerCommandsMap(object):
yield ('rlineto', args)
yield ('rrcurveto', last_args)
+def _convertBlendOpToArgs(blendList):
+ # args is list of blend op args. Since we are supporting
+ # recursive blend op calls, some of these args may also
+ # be a list of blend op args, and need to be converted before
+ # we convert the current list.
+ if any([isinstance(arg, list) for arg in blendList]):
+ args = [i for e in blendList for i in
+ (_convertBlendOpToArgs(e) if isinstance(e,list) else [e]) ]
+ else:
+ args = blendList
+
+ # We now know that blendList contains a blend op argument list, even if
+ # some of the args are lists that each contain a blend op argument list.
+ # Convert from:
+ # [default font arg sequence x0,...,xn] + [delta tuple for x0] + ... + [delta tuple for xn]
+ # to:
+ # [ [x0] + [delta tuple for x0],
+ # ...,
+ # [xn] + [delta tuple for xn] ]
+ numBlends = args[-1]
+ # Can't use args.pop() when the args are being used in a nested list
+ # comprehension. See calling context
+ args = args[:-1]
+
+ numRegions = len(args)//numBlends - 1
+ if not (numBlends*(numRegions + 1) == len(args)):
+ raise ValueError(blendList)
+
+ defaultArgs = [[arg] for arg in args[:numBlends]]
+ deltaArgs = args[numBlends:]
+ numDeltaValues = len(deltaArgs)
+ deltaList = [ deltaArgs[i:i + numRegions] for i in range(0, numDeltaValues, numRegions) ]
+ blend_args = [ a + b for a, b in zip(defaultArgs,deltaList)]
+ return blend_args
def generalizeCommands(commands, ignoreErrors=False):
result = []
mapping = _GeneralizerDecombinerCommandsMap
- for op,args in commands:
+ for op, args in commands:
+ # First, generalize any blend args in the arg list.
+ if any([isinstance(arg, list) for arg in args]):
+ try:
+ args = [n for arg in args for n in (_convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg])]
+ except ValueError:
+ if ignoreErrors:
+ # Store op as data, such that consumers of commands do not have to
+ # deal with incorrect number of arguments.
+ result.append(('', args))
+ result.append(('', [op]))
+ else:
+ raise
+
func = getattr(mapping, op, None)
if not func:
result.append((op,args))
@@ -225,8 +319,8 @@ def generalizeCommands(commands, ignoreErrors=False):
raise
return result
-def generalizeProgram(program, **kwargs):
- return commandsToProgram(generalizeCommands(programToCommands(program), **kwargs))
+def generalizeProgram(program, getNumRegions=None, **kwargs):
+ return commandsToProgram(generalizeCommands(programToCommands(program, getNumRegions), **kwargs))
def _categorizeVector(v):
@@ -267,6 +361,70 @@ def _negateCategory(a):
assert a in '0r'
return a
+def _convertToBlendCmds(args):
+ # return a list of blend commands, and
+ # the remaining non-blended args, if any.
+ num_args = len(args)
+ stack_use = 0
+ new_args = []
+ i = 0
+ while i < num_args:
+ arg = args[i]
+ if not isinstance(arg, list):
+ new_args.append(arg)
+ i += 1
+ stack_use += 1
+ else:
+ prev_stack_use = stack_use
+ # The arg is a tuple of blend values.
+ # These are each (master 0,delta 1..delta n)
+ # Combine as many successive tuples as we can,
+ # up to the max stack limit.
+ num_sources = len(arg)
+ blendlist = [arg]
+ i += 1
+ stack_use += 1 + num_sources # 1 for the num_blends arg
+ while (i < num_args) and isinstance(args[i], list):
+ blendlist.append(args[i])
+ i += 1
+ stack_use += num_sources
+ if stack_use + num_sources > maxStackLimit:
+ # if we are here, max stack is the CFF2 max stack.
+ # I use the CFF2 max stack limit here rather than
+ # the 'maxstack' chosen by the client, as the default
+ # maxstack may have been used unintentionally. For all
+ # the other operators, this just produces a little less
+ # optimization, but here it puts a hard (and low) limit
+ # on the number of source fonts that can be used.
+ break
+ # blendList now contains as many single blend tuples as can be
+ # combined without exceeding the CFF2 stack limit.
+ num_blends = len(blendlist)
+ # append the 'num_blends' default font values
+ blend_args = []
+ for arg in blendlist:
+ blend_args.append(arg[0])
+ for arg in blendlist:
+ blend_args.extend(arg[1:])
+ blend_args.append(num_blends)
+ new_args.append(blend_args)
+ stack_use = prev_stack_use + num_blends
+
+ return new_args
+
+def _addArgs(a, b):
+ if isinstance(b, list):
+ if isinstance(a, list):
+ if len(a) != len(b):
+ raise ValueError()
+ return [_addArgs(va, vb) for va,vb in zip(a, b)]
+ else:
+ a, b = b, a
+ if isinstance(a, list):
+ return [_addArgs(a[0], b)] + a[1:]
+ return a + b
+
+
def specializeCommands(commands,
ignoreErrors=False,
generalizeFirst=True,
@@ -302,6 +460,8 @@ def specializeCommands(commands,
# I have convinced myself that this produces optimal bytecode (except for, possibly
# one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-)
# A dynamic-programming approach can do the same but would be significantly slower.
+ #
+ # 7. For any args which are blend lists, convert them to a blend command.
# 0. Generalize commands.
@@ -417,12 +577,18 @@ def specializeCommands(commands,
continue
# Merge adjacent hlineto's and vlineto's.
+ # In CFF2 charstrings from variable fonts, each
+ # arg item may be a list of blendable values, one from
+ # each source font.
if (i and op in {'hlineto', 'vlineto'} and
- (op == commands[i-1][0]) and
- (not isinstance(args[0], list))):
+ (op == commands[i-1][0])):
_, other_args = commands[i-1]
assert len(args) == 1 and len(other_args) == 1
- commands[i-1] = (op, [other_args[0]+args[0]])
+ try:
+ new_args = [_addArgs(args[0], other_args[0])]
+ except ValueError:
+ continue
+ commands[i-1] = (op, new_args)
del commands[i]
continue
@@ -534,10 +700,16 @@ def specializeCommands(commands,
commands[i] = op0+op1+'curveto', args
continue
+ # 7. For any series of args which are blend lists, convert the series to a single blend arg.
+ for i in range(len(commands)):
+ op, args = commands[i]
+ if any(isinstance(arg, list) for arg in args):
+ commands[i] = op, _convertToBlendCmds(args)
+
return commands
-def specializeProgram(program, **kwargs):
- return commandsToProgram(specializeCommands(programToCommands(program), **kwargs))
+def specializeProgram(program, getNumRegions=None, **kwargs):
+ return commandsToProgram(specializeCommands(programToCommands(program, getNumRegions), **kwargs))
if __name__ == '__main__':
@@ -554,4 +726,3 @@ if __name__ == '__main__':
assert program == program2
print("Generalized program:"); print(programToString(generalizeProgram(program)))
print("Specialized program:"); print(programToString(specializeProgram(program)))
-
diff --git a/Lib/fontTools/designspaceLib/__init__.py b/Lib/fontTools/designspaceLib/__init__.py
index 4c5be8b4..e40f5edf 100644
--- a/Lib/fontTools/designspaceLib/__init__.py
+++ b/Lib/fontTools/designspaceLib/__init__.py
@@ -755,7 +755,6 @@ class BaseDocReader(LogMixin):
axisObject.labelNames[lang] = tounicode(labelNameElement.text)
self.documentObject.axes.append(axisObject)
self.axisDefaults[axisObject.name] = axisObject.default
- self.documentObject.defaultLoc = self.axisDefaults
def readSources(self):
for sourceCount, sourceElement in enumerate(self.root.findall(".sources/source")):
@@ -998,7 +997,6 @@ class DesignSpaceDocument(LogMixin, AsDictMixin):
self.axes = []
self.rules = []
self.default = None # name of the default master
- self.defaultLoc = None
self.lib = {}
"""Custom data associated with the whole document."""
@@ -1188,10 +1186,7 @@ class DesignSpaceDocument(LogMixin, AsDictMixin):
# Convert the default location from user space to design space before comparing
# it against the SourceDescriptor locations (always in design space).
- default_location_design = {
- axis.name: axis.map_forward(self.defaultLoc[axis.name])
- for axis in self.axes
- }
+ default_location_design = self.newDefaultLocation()
for sourceDescriptor in self.sources:
if sourceDescriptor.location == default_location_design:
@@ -1267,3 +1262,50 @@ class DesignSpaceDocument(LogMixin, AsDictMixin):
newConditions.append(dict(name=cond['name'], minimum=minimum, maximum=maximum))
newConditionSets.append(newConditions)
rule.conditionSets = newConditionSets
+
+ def loadSourceFonts(self, opener, **kwargs):
+ """Ensure SourceDescriptor.font attributes are loaded, and return list of fonts.
+
+ Takes a callable which initializes a new font object (e.g. TTFont, or
+ defcon.Font, etc.) from the SourceDescriptor.path, and sets the
+ SourceDescriptor.font attribute.
+ If the font attribute is already not None, it is not loaded again.
+ Fonts with the same path are only loaded once and shared among SourceDescriptors.
+
+ For example, to load UFO sources using defcon:
+
+ designspace = DesignSpaceDocument.fromfile("path/to/my.designspace")
+ designspace.loadSourceFonts(defcon.Font)
+
+ Or to load masters as FontTools binary fonts, including extra options:
+
+ designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False)
+
+ Args:
+ opener (Callable): takes one required positional argument, the source.path,
+ and an optional list of keyword arguments, and returns a new font object
+ loaded from the path.
+ **kwargs: extra options passed on to the opener function.
+
+ Returns:
+ List of font objects in the order they appear in the sources list.
+ """
+ # we load fonts with the same source.path only once
+ loaded = {}
+ fonts = []
+ for source in self.sources:
+ if source.font is not None: # font already loaded
+ fonts.append(source.font)
+ continue
+ if source.path in loaded:
+ source.font = loaded[source.path]
+ else:
+ if source.path is None:
+ raise DesignSpaceDocumentError(
+ "Designspace source '%s' has no 'path' attribute"
+ % (source.name or "<Unknown>")
+ )
+ source.font = opener(source.path, **kwargs)
+ loaded[source.path] = source.font
+ fonts.append(source.font)
+ return fonts
diff --git a/Lib/fontTools/feaLib/ast.py b/Lib/fontTools/feaLib/ast.py
index 39dc4bcf..1994fc08 100644
--- a/Lib/fontTools/feaLib/ast.py
+++ b/Lib/fontTools/feaLib/ast.py
@@ -1143,6 +1143,12 @@ class ValueRecord(Expression):
elif yAdvance is None and not vertical:
return str(xAdvance)
+ # Make any remaining None value 0 to avoid generating invalid records.
+ x = x or 0
+ y = y or 0
+ xAdvance = xAdvance or 0
+ yAdvance = yAdvance or 0
+
# Try format B, if possible.
if (xPlaDevice is None and yPlaDevice is None and
xAdvDevice is None and yAdvDevice is None):
diff --git a/Lib/fontTools/feaLib/builder.py b/Lib/fontTools/feaLib/builder.py
index 456ae3cc..8880acf1 100644
--- a/Lib/fontTools/feaLib/builder.py
+++ b/Lib/fontTools/feaLib/builder.py
@@ -7,6 +7,7 @@ from fontTools.feaLib.error import FeatureLibError
from fontTools.feaLib.parser import Parser
from fontTools.feaLib.ast import FeatureFile
from fontTools.otlLib import builder as otl
+from fontTools.otlLib.maxContextCalc import maxCtxFont
from fontTools.ttLib import newTable, getTableModule
from fontTools.ttLib.tables import otBase, otTables
from collections import defaultdict, OrderedDict
@@ -137,6 +138,9 @@ class Builder(object):
fontTable.table = table
elif tag in self.font:
del self.font[tag]
+ if (any(tag in self.font for tag in ("GPOS", "GSUB")) and
+ "OS/2" in self.font):
+ self.font["OS/2"].usMaxContext = maxCtxFont(self.font)
if "GDEF" in tables:
gdef = self.buildGDEF()
if gdef:
diff --git a/Lib/fontTools/fontBuilder.py b/Lib/fontTools/fontBuilder.py
index 8854c164..ef14b045 100644
--- a/Lib/fontTools/fontBuilder.py
+++ b/Lib/fontTools/fontBuilder.py
@@ -21,6 +21,7 @@ that works:
fb.setupHorizontalHeader()
fb.setupNameTable(...)
fb.setupOS2()
+ fb.addOpenTypeFeatures(...)
fb.setupPost()
fb.save(...)
@@ -100,9 +101,10 @@ charString = pen.getCharString()
charStrings = {".notdef": charString, "A": charString, "a": charString, ".null": charString}
fb.setupCFF(nameStrings['psName'], {"FullName": nameStrings['psName']}, charStrings, {})
+lsb = {gn: cs.calcBounds(None)[0] for gn, cs in charStrings.items()}
metrics = {}
for gn, advanceWidth in advanceWidths.items():
- metrics[gn] = (advanceWidth, 100) # XXX lsb from glyph
+ metrics[gn] = (advanceWidth, lsb[gn])
fb.setupHorizontalMetrics(metrics)
fb.setupHorizontalHeader(ascent=824, descent=200)
@@ -298,7 +300,7 @@ _OS2Defaults = dict(
sCapHeight = 0,
usDefaultChar = 0, # .notdef
usBreakChar = 32, # space
- usMaxContext = 2, # just kerning
+ usMaxContext = 0,
usLowerOpticalPointSize = 0,
usUpperOpticalPointSize = 0,
)
@@ -690,8 +692,9 @@ class FontBuilder(object):
"""Create a new `post` table and initialize it with default values,
which can be overridden by keyword arguments.
"""
+ isCFF2 = 'CFF2' in self.font
postTable = self._initTableWithValues("post", _postDefaults, values)
- if self.isTTF and keepGlyphNames:
+ if (self.isTTF or isCFF2) and keepGlyphNames:
postTable.formatType = 2.0
postTable.extraNames = []
postTable.mapping = {}
diff --git a/Lib/fontTools/misc/arrayTools.py b/Lib/fontTools/misc/arrayTools.py
index f2cfac83..ed20230c 100644
--- a/Lib/fontTools/misc/arrayTools.py
+++ b/Lib/fontTools/misc/arrayTools.py
@@ -6,6 +6,7 @@
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
+from fontTools.misc.fixedTools import otRound
from numbers import Number
import math
import operator
@@ -20,10 +21,11 @@ def calcBounds(array):
ys = [y for x, y in array]
return min(xs), min(ys), max(xs), max(ys)
-def calcIntBounds(array):
+def calcIntBounds(array, round=otRound):
"""Return the integer bounding rectangle of a 2D points array as a
tuple: (xMin, yMin, xMax, yMax)
- Values are rounded to closest integer.
+ Values are rounded to closest integer towards +Infinity using otRound
+ function by default, unless an optional 'round' function is passed.
"""
return tuple(round(v) for v in calcBounds(array))
diff --git a/Lib/fontTools/misc/cliTools.py b/Lib/fontTools/misc/cliTools.py
index 59ac3be1..8420e3e7 100644
--- a/Lib/fontTools/misc/cliTools.py
+++ b/Lib/fontTools/misc/cliTools.py
@@ -5,7 +5,7 @@ import os
import re
-numberAddedRE = re.compile("#\d+$")
+numberAddedRE = re.compile(r"#\d+$")
def makeOutputFileName(input, outputDir=None, extension=None, overWrite=False):
diff --git a/Lib/fontTools/misc/filenames.py b/Lib/fontTools/misc/filenames.py
index 6cf02e36..260ace4c 100644
--- a/Lib/fontTools/misc/filenames.py
+++ b/Lib/fontTools/misc/filenames.py
@@ -15,7 +15,7 @@ from __future__ import unicode_literals
from fontTools.misc.py23 import basestring, unicode
-illegalCharacters = "\" * + / : < > ? [ \ ] | \0".split(" ")
+illegalCharacters = r"\" * + / : < > ? [ \ ] | \0".split(" ")
illegalCharacters += [chr(i) for i in range(1, 32)]
illegalCharacters += [chr(0x7F)]
reservedFileNames = "CON PRN AUX CLOCK$ NUL A:-Z: COM1".lower().split(" ")
diff --git a/Lib/fontTools/misc/psCharStrings.py b/Lib/fontTools/misc/psCharStrings.py
index 7fc7a26f..a97ec96d 100644
--- a/Lib/fontTools/misc/psCharStrings.py
+++ b/Lib/fontTools/misc/psCharStrings.py
@@ -944,6 +944,16 @@ class T2CharString(object):
self.program = program
self.private = private
self.globalSubrs = globalSubrs if globalSubrs is not None else []
+ self._cur_vsindex = None
+
+ def getNumRegions(self, vsindex=None):
+ pd = self.private
+ assert(pd is not None)
+ if vsindex is not None:
+ self._cur_vsindex = vsindex
+ elif self._cur_vsindex is None:
+ self._cur_vsindex = pd.vsindex if hasattr(pd, 'vsindex') else 0
+ return pd.getNumRegions(self._cur_vsindex)
def __repr__(self):
if self.bytecode is None:
diff --git a/Lib/fontTools/misc/sstruct.py b/Lib/fontTools/misc/sstruct.py
index 528b5e02..6b7e783e 100644
--- a/Lib/fontTools/misc/sstruct.py
+++ b/Lib/fontTools/misc/sstruct.py
@@ -110,20 +110,20 @@ def calcsize(fmt):
# matches "name:formatchar" (whitespace is allowed)
_elementRE = re.compile(
- "\s*" # whitespace
- "([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier)
- "\s*:\s*" # whitespace : whitespace
- "([cbBhHiIlLqQfd]|[0-9]+[ps]|" # formatchar...
- "([0-9]+)\.([0-9]+)(F))" # ...formatchar
- "\s*" # whitespace
- "(#.*)?$" # [comment] + end of string
+ r"\s*" # whitespace
+ r"([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier)
+ r"\s*:\s*" # whitespace : whitespace
+ r"([cbBhHiIlLqQfd]|[0-9]+[ps]|" # formatchar...
+ r"([0-9]+)\.([0-9]+)(F))" # ...formatchar
+ r"\s*" # whitespace
+ r"(#.*)?$" # [comment] + end of string
)
# matches the special struct fmt chars and 'x' (pad byte)
-_extraRE = re.compile("\s*([x@=<>!])\s*(#.*)?$")
+_extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$")
# matches an "empty" string, possibly containing whitespace and/or a comment
-_emptyRE = re.compile("\s*(#.*)?$")
+_emptyRE = re.compile(r"\s*(#.*)?$")
_fixedpointmappings = {
8: "b",
diff --git a/Lib/fontTools/otlLib/builder.py b/Lib/fontTools/otlLib/builder.py
index efe66d53..f8b3ce3b 100644
--- a/Lib/fontTools/otlLib/builder.py
+++ b/Lib/fontTools/otlLib/builder.py
@@ -408,7 +408,8 @@ def buildSinglePos(mapping, glyphMap):
# If a ValueRecord is shared between multiple glyphs, we generate
# a SinglePos format 1 subtable; that is the most compact form.
for key, glyphs in coverages.items():
- if len(glyphs) > 1:
+ # 5 ushorts is the length of introducing another sublookup
+ if len(glyphs) * _getSinglePosValueSize(key) > 5:
format1Mapping = {g: values[key] for g in glyphs}
result.append(buildSinglePosSubtable(format1Mapping, glyphMap))
handled.add(key)
@@ -419,17 +420,18 @@ def buildSinglePos(mapping, glyphMap):
for valueFormat, keys in masks.items():
f2 = [k for k in keys if k not in handled]
if len(f2) > 1:
- format2Mapping = {coverages[k][0]: values[k] for k in f2}
+ format2Mapping = {}
+ for k in f2:
+ format2Mapping.update((g, values[k]) for g in coverages[k])
result.append(buildSinglePosSubtable(format2Mapping, glyphMap))
handled.update(f2)
- # The remaining ValueRecords are singletons in the sense that
- # they are only used by a single glyph, and their valueFormat
- # is unique as well. We encode these in format 1 again.
+ # The remaining ValueRecords are only used by a few glyphs, normally
+ # one. We encode these in format 1 again.
for key, glyphs in coverages.items():
if key not in handled:
- assert len(glyphs) == 1, glyphs
- st = buildSinglePosSubtable({glyphs[0]: values[key]}, glyphMap)
+ for g in glyphs:
+ st = buildSinglePosSubtable({g: values[key]}, glyphMap)
result.append(st)
# When the OpenType layout engine traverses the subtables, it will
@@ -491,6 +493,15 @@ def _makeDeviceTuple(device):
return (device.DeltaFormat, device.StartSize, device.EndSize,
tuple(device.DeltaValue))
+def _getSinglePosValueSize(valueKey):
+ """Returns how many ushorts this valueKey (short form of ValueRecord) takes up"""
+ count = 0
+ for k in valueKey[1:]:
+ if hasattr(k[1], '__len__') and len(k[1]):
+ count += len(k[1][3]) + 3
+ else:
+ count += 1
+ return count
def buildValue(value):
self = ValueRecord()
diff --git a/Lib/fontTools/otlLib/maxContextCalc.py b/Lib/fontTools/otlLib/maxContextCalc.py
new file mode 100644
index 00000000..5659310f
--- /dev/null
+++ b/Lib/fontTools/otlLib/maxContextCalc.py
@@ -0,0 +1,101 @@
+from __future__ import print_function, division, absolute_import, unicode_literals
+
+__all__ = ['maxCtxFont']
+
+
+def maxCtxFont(font):
+ """Calculate the usMaxContext value for an entire font."""
+
+ maxCtx = 0
+ for tag in ('GSUB', 'GPOS'):
+ if tag not in font:
+ continue
+ table = font[tag].table
+ if not table.LookupList:
+ continue
+ for lookup in table.LookupList.Lookup:
+ for st in lookup.SubTable:
+ maxCtx = maxCtxSubtable(maxCtx, tag, lookup.LookupType, st)
+ return maxCtx
+
+
+def maxCtxSubtable(maxCtx, tag, lookupType, st):
+ """Calculate usMaxContext based on a single lookup table (and an existing
+ max value).
+ """
+
+ # single positioning, single / multiple substitution
+ if (tag == 'GPOS' and lookupType == 1) or (
+ tag == 'GSUB' and lookupType in (1, 2, 3)):
+ maxCtx = max(maxCtx, 1)
+
+ # pair positioning
+ elif tag == 'GPOS' and lookupType == 2:
+ maxCtx = max(maxCtx, 2)
+
+ # ligatures
+ elif tag == 'GSUB' and lookupType == 4:
+ for ligatures in st.ligatures.values():
+ for ligature in ligatures:
+ maxCtx = max(maxCtx, ligature.CompCount)
+
+ # context
+ elif (tag == 'GPOS' and lookupType == 7) or (
+ tag == 'GSUB' and lookupType == 5):
+ maxCtx = maxCtxContextualSubtable(
+ maxCtx, st, 'Pos' if tag == 'GPOS' else 'Sub')
+
+ # chained context
+ elif (tag == 'GPOS' and lookupType == 8) or (
+ tag == 'GSUB' and lookupType == 6):
+ maxCtx = maxCtxContextualSubtable(
+ maxCtx, st, 'Pos' if tag == 'GPOS' else 'Sub', 'Chain')
+
+ # extensions
+ elif (tag == 'GPOS' and lookupType == 9) or (
+ tag == 'GSUB' and lookupType == 7):
+ maxCtx = maxCtxSubtable(
+ maxCtx, tag, st.ExtensionLookupType, st.ExtSubTable)
+
+ # reverse-chained context
+ elif tag == 'GSUB' and lookupType == 8:
+ maxCtx = maxCtxContextualRule(maxCtx, st, 'Reverse')
+
+ return maxCtx
+
+
+def maxCtxContextualSubtable(maxCtx, st, ruleType, chain=''):
+ """Calculate usMaxContext based on a contextual feature subtable."""
+
+ if st.Format == 1:
+ for ruleset in getattr(st, '%s%sRuleSet' % (chain, ruleType)):
+ if ruleset is None:
+ continue
+ for rule in getattr(ruleset, '%s%sRule' % (chain, ruleType)):
+ if rule is None:
+ continue
+ maxCtx = maxCtxContextualRule(maxCtx, rule, chain)
+
+ elif st.Format == 2:
+ for ruleset in getattr(st, '%s%sClassSet' % (chain, ruleType)):
+ if ruleset is None:
+ continue
+ for rule in getattr(ruleset, '%s%sClassRule' % (chain, ruleType)):
+ if rule is None:
+ continue
+ maxCtx = maxCtxContextualRule(maxCtx, rule, chain)
+
+ elif st.Format == 3:
+ maxCtx = maxCtxContextualRule(maxCtx, st, chain)
+
+ return maxCtx
+
+
+def maxCtxContextualRule(maxCtx, st, chain):
+ """Calculate usMaxContext based on a contextual feature rule."""
+
+ if not chain:
+ return max(maxCtx, st.GlyphCount)
+ elif chain == 'Reverse':
+ return max(maxCtx, st.GlyphCount + st.LookAheadGlyphCount)
+ return max(maxCtx, st.InputGlyphCount + st.LookAheadGlyphCount)
diff --git a/Lib/fontTools/subset/__init__.py b/Lib/fontTools/subset/__init__.py
index 6e17cc33..efa06c4c 100644
--- a/Lib/fontTools/subset/__init__.py
+++ b/Lib/fontTools/subset/__init__.py
@@ -7,6 +7,7 @@ from fontTools.misc.py23 import *
from fontTools.misc.fixedTools import otRound
from fontTools import ttLib
from fontTools.ttLib.tables import otTables
+from fontTools.otlLib.maxContextCalc import maxCtxFont
from fontTools.pens.basePen import NullPen
from fontTools.misc.loggingTools import Timer
from fontTools.subset.cff import *
@@ -322,6 +323,10 @@ Other font-specific options:
Update the 'OS/2 xAvgCharWidth' field after subsetting.
--no-recalc-average-width
Don't change the 'OS/2 xAvgCharWidth' field. [default]
+ --recalc-max-context
+ Update the 'OS/2 usMaxContext' field after subsetting.
+ --no-recalc-max-context
+ Don't change the 'OS/2 usMaxContext' field. [default]
--font-number=<number>
Select font number for TrueType Collection (.ttc/.otc), starting from 0.
@@ -1789,39 +1794,47 @@ def subset_glyphs(self, s):
self.glyphCount = len(self.variations)
return bool(self.variations)
+def _remap_index_map(s, varidx_map, table_map):
+ map_ = {k:varidx_map[v] for k,v in table_map.mapping.items()}
+ # Emptied glyphs are remapped to:
+ # if GID <= last retained GID, 0/0: delta set for 0/0 is expected to exist & zeros compress well
+ # if GID > last retained GID, major/minor of the last retained glyph: will be optimized out by table compiler
+ last_idx = varidx_map[table_map.mapping[s.last_retained_glyph]]
+ for g,i in s.reverseEmptiedGlyphMap.items():
+ map_[g] = last_idx if i > s.last_retained_order else 0
+ return map_
+
@_add_method(ttLib.getTableClass('HVAR'))
def subset_glyphs(self, s):
table = self.table
- # TODO Update for retain_gids
-
used = set()
+ advIdxes_ = set()
+ retainAdvMap = False
if table.AdvWidthMap:
- if not s.options.retain_gids:
- table.AdvWidthMap.mapping = _dict_subset(table.AdvWidthMap.mapping, s.glyphs)
+ table.AdvWidthMap.mapping = _dict_subset(table.AdvWidthMap.mapping, s.glyphs)
used.update(table.AdvWidthMap.mapping.values())
else:
- assert table.LsbMap is None and table.RsbMap is None, "File a bug."
used.update(s.reverseOrigGlyphMap.values())
+ advIdxes_ = used.copy()
+ retainAdvMap = s.options.retain_gids
if table.LsbMap:
- if not s.options.retain_gids:
- table.LsbMap.mapping = _dict_subset(table.LsbMap.mapping, s.glyphs)
+ table.LsbMap.mapping = _dict_subset(table.LsbMap.mapping, s.glyphs)
used.update(table.LsbMap.mapping.values())
if table.RsbMap:
- if not s.options.retain_gids:
- table.RsbMap.mapping = _dict_subset(table.RsbMap.mapping, s.glyphs)
+ table.RsbMap.mapping = _dict_subset(table.RsbMap.mapping, s.glyphs)
used.update(table.RsbMap.mapping.values())
- varidx_map = varStore.VarStore_subset_varidxes(table.VarStore, used)
+ varidx_map = varStore.VarStore_subset_varidxes(table.VarStore, used, retainFirstMap=retainAdvMap, advIdxes=advIdxes_)
if table.AdvWidthMap:
- table.AdvWidthMap.mapping = {k:varidx_map[v] for k,v in table.AdvWidthMap.mapping.items()}
+ table.AdvWidthMap.mapping = _remap_index_map(s, varidx_map, table.AdvWidthMap)
if table.LsbMap:
- table.LsbMap.mapping = {k:varidx_map[v] for k,v in table.LsbMap.mapping.items()}
+ table.LsbMap.mapping = _remap_index_map(s, varidx_map, table.LsbMap)
if table.RsbMap:
- table.RsbMap.mapping = {k:varidx_map[v] for k,v in table.RsbMap.mapping.items()}
+ table.RsbMap.mapping = _remap_index_map(s, varidx_map, table.RsbMap)
# TODO Return emptiness...
return True
@@ -1831,13 +1844,17 @@ def subset_glyphs(self, s):
table = self.table
used = set()
+ advIdxes_ = set()
+ retainAdvMap = False
if table.AdvHeightMap:
table.AdvHeightMap.mapping = _dict_subset(table.AdvHeightMap.mapping, s.glyphs)
used.update(table.AdvHeightMap.mapping.values())
else:
- assert table.TsbMap is None and table.BsbMap is None and table.VOrgMap is None, "File a bug."
used.update(s.reverseOrigGlyphMap.values())
+ advIdxes_ = used.copy()
+ retainAdvMap = s.options.retain_gids
+
if table.TsbMap:
table.TsbMap.mapping = _dict_subset(table.TsbMap.mapping, s.glyphs)
used.update(table.TsbMap.mapping.values())
@@ -1848,16 +1865,16 @@ def subset_glyphs(self, s):
table.VOrgMap.mapping = _dict_subset(table.VOrgMap.mapping, s.glyphs)
used.update(table.VOrgMap.mapping.values())
- varidx_map = varStore.VarStore_subset_varidxes(table.VarStore, used)
+ varidx_map = varStore.VarStore_subset_varidxes(table.VarStore, used, retainFirstMap=retainAdvMap, advIdxes=advIdxes_)
if table.AdvHeightMap:
- table.AdvHeightMap.mapping = {k:varidx_map[v] for k,v in table.AdvHeightMap.mapping.items()}
+ table.AdvHeightMap.mapping = _remap_index_map(s, varidx_map, table.AdvHeightMap)
if table.TsbMap:
- table.TsbMap.mapping = {k:varidx_map[v] for k,v in table.TsbMap.mapping.items()}
+ table.TsbMap.mapping = _remap_index_map(s, varidx_map, table.TsbMap)
if table.BsbMap:
- table.BsbMap.mapping = {k:varidx_map[v] for k,v in table.BsbMap.mapping.items()}
+ table.BsbMap.mapping = _remap_index_map(s, varidx_map, table.BsbMap)
if table.VOrgMap:
- table.VOrgMap.mapping = {k:varidx_map[v] for k,v in table.VOrgMap.mapping.items()}
+ table.VOrgMap.mapping = _remap_index_map(s, varidx_map, table.VOrgMap)
# TODO Return emptiness...
return True
@@ -2301,6 +2318,7 @@ class Options(object):
self.recalc_timestamp = False # Recalculate font modified timestamp
self.prune_unicode_ranges = True # Clear unused 'ulUnicodeRange' bits
self.recalc_average_width = False # update 'xAvgCharWidth'
+ self.recalc_max_context = False # update 'usMaxContext'
self.canonical_order = None # Order tables as recommended
self.flavor = None # May be 'woff' or 'woff2'
self.with_zopfli = False # use zopfli instead of zlib for WOFF 1.0
@@ -2561,6 +2579,9 @@ class Subsetter(object):
order = font.getReverseGlyphMap()
self.reverseOrigGlyphMap = {g:order[g] for g in self.glyphs_retained}
+ self.reverseEmptiedGlyphMap = {g:order[g] for g in self.glyphs_emptied}
+ self.last_retained_order = max(self.reverseOrigGlyphMap.values())
+ self.last_retained_glyph = font.getGlyphOrder()[self.last_retained_order]
log.info("Retaining %d glyphs", len(self.glyphs_retained))
@@ -2610,6 +2631,11 @@ class Subsetter(object):
if avg_width != font[tag].xAvgCharWidth:
font[tag].xAvgCharWidth = avg_width
log.info("%s xAvgCharWidth updated: %d", tag, avg_width)
+ if self.options.recalc_max_context:
+ max_context = maxCtxFont(font)
+ if max_context != font[tag].usMaxContext:
+ font[tag].usMaxContext = max_context
+ log.info("%s usMaxContext updated: %d", tag, max_context)
clazz = ttLib.getTableClass(tag)
if hasattr(clazz, 'prune_post_subset'):
with timer("prune '%s'" % tag):
diff --git a/Lib/fontTools/subset/cff.py b/Lib/fontTools/subset/cff.py
index 274071e5..fc29f182 100644
--- a/Lib/fontTools/subset/cff.py
+++ b/Lib/fontTools/subset/cff.py
@@ -366,7 +366,7 @@ class StopHintCountEvent(Exception):
class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler):
- stop_hintcount_ops = ("op_hstem", "op_vstem", "op_rmoveto", "op_hmoveto",
+ stop_hintcount_ops = ("op_hintmask", "op_cntrmask", "op_rmoveto", "op_hmoveto",
"op_vmoveto")
def __init__(self, localSubrs, globalSubrs, private=None):
@@ -379,6 +379,10 @@ class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler):
setattr(self, op_name, self.stop_hint_count)
if hasattr(charString, '_desubroutinized'):
+ # If a charstring has already been desubroutinized, we will still
+ # need to execute it if we need to count hints in order to
+ # compute the byte length for mask arguments, and haven't finished
+ # counting hints pairs.
if self.need_hintcount and self.callingStack:
try:
psCharStrings.SimpleT2Decompiler.execute(self, charString)
diff --git a/Lib/fontTools/svgLib/path/__init__.py b/Lib/fontTools/svgLib/path/__init__.py
index 017ff57e..5dd3329c 100644
--- a/Lib/fontTools/svgLib/path/__init__.py
+++ b/Lib/fontTools/svgLib/path/__init__.py
@@ -55,6 +55,10 @@ class SVGPath(object):
# xpath | doesn't seem to reliable work so just walk it
for el in self.root.iter():
pb.add_path_from_element(el)
- for path in pb.paths:
+ original_pen = pen
+ for path, transform in zip(pb.paths, pb.transforms):
+ if transform:
+ pen = TransformPen(original_pen, transform)
+ else:
+ pen = original_pen
parse_path(path, pen)
-
diff --git a/Lib/fontTools/svgLib/path/parser.py b/Lib/fontTools/svgLib/path/parser.py
index ae0aba39..bdf3de0c 100644
--- a/Lib/fontTools/svgLib/path/parser.py
+++ b/Lib/fontTools/svgLib/path/parser.py
@@ -18,7 +18,7 @@ COMMANDS = set('MmZzLlHhVvCcSsQqTtAa')
UPPERCASE = set('MZLHVCSQTA')
COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])")
-FLOAT_RE = re.compile("[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?")
+FLOAT_RE = re.compile(r"[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?")
def _tokenize_path(pathdef):
diff --git a/Lib/fontTools/svgLib/path/shapes.py b/Lib/fontTools/svgLib/path/shapes.py
index a83274e4..4cc633ad 100644
--- a/Lib/fontTools/svgLib/path/shapes.py
+++ b/Lib/fontTools/svgLib/path/shapes.py
@@ -1,3 +1,6 @@
+import re
+
+
def _prefer_non_zero(*args):
for arg in args:
if arg != 0:
@@ -16,12 +19,28 @@ def _strip_xml_ns(tag):
return tag.split('}', 1)[1] if '}' in tag else tag
+def _transform(raw_value):
+ # TODO assumes a 'matrix' transform.
+ # No other transform functions are supported at the moment.
+ # https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/transform
+ # start simple: if you aren't exactly matrix(...) then no love
+ match = re.match(r'matrix\((.*)\)', raw_value)
+ if not match:
+ raise NotImplementedError
+ matrix = tuple(float(p) for p in re.split(r'\s+|,', match.group(1)))
+ if len(matrix) != 6:
+ raise ValueError('wrong # of terms in %s' % raw_value)
+ return matrix
+
+
class PathBuilder(object):
def __init__(self):
self.paths = []
+ self.transforms = []
def _start_path(self, initial_path=''):
self.paths.append(initial_path)
+ self.transforms.append(None)
def _end_path(self):
self._add('z')
@@ -68,6 +87,25 @@ class PathBuilder(object):
def v(self, y):
self._vhline('v', y)
+ def _line(self, c, x, y):
+ self._add('%s%s,%s' % (c, _ntos(x), _ntos(y)))
+
+ def L(self, x, y):
+ self._line('L', x, y)
+
+ def l(self, x, y):
+ self._line('l', x, y)
+
+ def _parse_line(self, line):
+ x1 = float(line.attrib.get('x1', 0))
+ y1 = float(line.attrib.get('y1', 0))
+ x2 = float(line.attrib.get('x2', 0))
+ y2 = float(line.attrib.get('y2', 0))
+
+ self._start_path()
+ self.M(x1, y1)
+ self.L(x2, y2)
+
def _parse_rect(self, rect):
x = float(rect.attrib.get('x', 0))
y = float(rect.attrib.get('y', 0))
@@ -105,6 +143,10 @@ class PathBuilder(object):
self._start_path('M' + poly.attrib['points'])
self._end_path()
+ def _parse_polyline(self, poly):
+ if 'points' in poly.attrib:
+ self._start_path('M' + poly.attrib['points'])
+
def _parse_circle(self, circle):
cx = float(circle.attrib.get('cx', 0))
cy = float(circle.attrib.get('cy', 0))
@@ -116,10 +158,24 @@ class PathBuilder(object):
self.A(r, r, cx + r, cy, large_arc=1)
self.A(r, r, cx - r, cy, large_arc=1)
+ def _parse_ellipse(self, ellipse):
+ cx = float(ellipse.attrib.get('cx', 0))
+ cy = float(ellipse.attrib.get('cy', 0))
+ rx = float(ellipse.attrib.get('rx'))
+ ry = float(ellipse.attrib.get('ry'))
+
+ # arc doesn't seem to like being a complete shape, draw two halves
+ self._start_path()
+ self.M(cx - rx, cy)
+ self.A(rx, ry, cx + rx, cy, large_arc=1)
+ self.A(rx, ry, cx - rx, cy, large_arc=1)
+
def add_path_from_element(self, el):
tag = _strip_xml_ns(el.tag)
parse_fn = getattr(self, '_parse_%s' % tag.lower(), None)
if not callable(parse_fn):
return False
parse_fn(el)
+ if 'transform' in el.attrib:
+ self.transforms[-1] = _transform(el.attrib['transform'])
return True
diff --git a/Lib/fontTools/ttLib/tables/S__i_l_f.py b/Lib/fontTools/ttLib/tables/S__i_l_f.py
index e68b9b2e..00d5f616 100644
--- a/Lib/fontTools/ttLib/tables/S__i_l_f.py
+++ b/Lib/fontTools/ttLib/tables/S__i_l_f.py
@@ -82,6 +82,12 @@ Silf_pseudomap_format = '''
nPseudo: H
'''
+Silf_pseudomap_format_h = '''
+ >
+ unicode: H
+ nPseudo: H
+'''
+
Silf_classmap_format = '''
>
numClass: H
@@ -219,7 +225,7 @@ def disassemble(aCode):
pc += struct.calcsize(fmt)
return res
-instre = re.compile("^\s*([^(]+)\s*(?:\(([^)]+)\))?")
+instre = re.compile(r"^\s*([^(]+)\s*(?:\(([^)]+)\))?")
def assemble(instrs):
res = b""
for inst in instrs:
@@ -231,7 +237,7 @@ def assemble(instrs):
if m.group(2):
if parmfmt == 0:
continue
- parms = [int(x) for x in re.split(",\s*", m.group(2))]
+ parms = [int(x) for x in re.split(r",\s*", m.group(2))]
if parmfmt == -1:
l = len(parms)
res += struct.pack(("%dB" % (l+1)), l, *parms)
@@ -406,7 +412,7 @@ class Silf(object):
if version >= 3.0:
pseudo = sstruct.unpack(Silf_pseudomap_format, data[8+6*i:14+6*i], _Object())
else:
- pseudo = struct.unpack('>HH', data[8+4*i:12+4*i], _Object())
+ pseudo = sstruct.unpack(Silf_pseudomap_format_h, data[8+4*i:12+4*i], _Object())
self.pMap[pseudo.unicode] = ttFont.getGlyphName(pseudo.nPseudo)
data = data[8 + 6 * numPseudo:]
currpos = (sstruct.calcsize(Silf_part1_format)
diff --git a/Lib/fontTools/ttLib/tables/S__i_l_l.py b/Lib/fontTools/ttLib/tables/S__i_l_l.py
index 4671e137..bf9d83f5 100644
--- a/Lib/fontTools/ttLib/tables/S__i_l_l.py
+++ b/Lib/fontTools/ttLib/tables/S__i_l_l.py
@@ -28,7 +28,7 @@ class table_S__i_l_l(DefaultTable.DefaultTable):
data[i * 8:(i+1) * 8])
offset = int(offset / 8) - (numLangs + 1)
langcode = langcode.replace(b'\000', b'')
- langinfo.append((langcode, numsettings, offset))
+ langinfo.append((langcode.decode("utf-8"), numsettings, offset))
maxsetting = max(maxsetting, offset + numsettings)
data = data[numLangs * 8:]
finfo = []
diff --git a/Lib/fontTools/ttLib/tables/_g_l_y_f.py b/Lib/fontTools/ttLib/tables/_g_l_y_f.py
index b8020ca7..83d5315b 100644
--- a/Lib/fontTools/ttLib/tables/_g_l_y_f.py
+++ b/Lib/fontTools/ttLib/tables/_g_l_y_f.py
@@ -917,8 +917,12 @@ class Glyph(object):
expanding it."""
if not hasattr(self, "data"):
if remove_hinting:
- self.program = ttProgram.Program()
- self.program.fromBytecode([])
+ if self.isComposite():
+ if hasattr(self, "program"):
+ del self.program
+ else:
+ self.program = ttProgram.Program()
+ self.program.fromBytecode([])
# No padding to trim.
return
if not self.data:
diff --git a/Lib/fontTools/ttLib/tables/_g_v_a_r.py b/Lib/fontTools/ttLib/tables/_g_v_a_r.py
index 608b6a2d..f9c9e528 100644
--- a/Lib/fontTools/ttLib/tables/_g_v_a_r.py
+++ b/Lib/fontTools/ttLib/tables/_g_v_a_r.py
@@ -107,8 +107,15 @@ class table__g_v_a_r(DefaultTable.DefaultTable):
glyph = ttFont["glyf"][glyphName]
numPointsInGlyph = self.getNumPoints_(glyph)
gvarData = data[offsetToData + offsets[i] : offsetToData + offsets[i + 1]]
- self.variations[glyphName] = decompileGlyph_(
- numPointsInGlyph, sharedCoords, axisTags, gvarData)
+ try:
+ self.variations[glyphName] = decompileGlyph_(
+ numPointsInGlyph, sharedCoords, axisTags, gvarData)
+ except Exception:
+ log.error(
+ "Failed to decompile deltas for glyph '%s' (%d points)",
+ glyphName, numPointsInGlyph,
+ )
+ raise
@staticmethod
def decompileOffsets_(data, tableFormat, glyphCount):
diff --git a/Lib/fontTools/ttLib/tables/_h_e_a_d.py b/Lib/fontTools/ttLib/tables/_h_e_a_d.py
index 4235acf4..42fbb1d4 100644
--- a/Lib/fontTools/ttLib/tables/_h_e_a_d.py
+++ b/Lib/fontTools/ttLib/tables/_h_e_a_d.py
@@ -4,6 +4,7 @@ from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval, num2binary, binary2num
from fontTools.misc.timeTools import timestampFromString, timestampToString, timestampNow
from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat
+from fontTools.misc.arrayTools import intRect
from . import DefaultTable
import logging
@@ -63,7 +64,7 @@ class table__h_e_a_d(DefaultTable.DefaultTable):
# For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().
if 'CFF ' in ttFont:
topDict = ttFont['CFF '].cff.topDictIndex[0]
- self.xMin, self.yMin, self.xMax, self.yMax = topDict.FontBBox
+ self.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox)
if ttFont.recalcTimestamp:
self.modified = timestampNow()
data = sstruct.pack(headFormat, self)
diff --git a/Lib/fontTools/ttLib/tables/otBase.py b/Lib/fontTools/ttLib/tables/otBase.py
index 81ab354e..816f1cd0 100644
--- a/Lib/fontTools/ttLib/tables/otBase.py
+++ b/Lib/fontTools/ttLib/tables/otBase.py
@@ -104,6 +104,7 @@ class BaseTTXConverter(DefaultTable):
tableClass = getattr(otTables, self.tableTag)
self.table = tableClass()
self.table.fromXML(name, attrs, content, font)
+ self.table.populateDefaults()
class OTTableReader(object):
diff --git a/Lib/fontTools/ttLib/tables/otTables.py b/Lib/fontTools/ttLib/tables/otTables.py
index af2a7c6c..737e5615 100644
--- a/Lib/fontTools/ttLib/tables/otTables.py
+++ b/Lib/fontTools/ttLib/tables/otTables.py
@@ -1130,6 +1130,7 @@ class LigatureSubst(FormatSwitchingBaseTable):
lig.LigGlyph = attrs["glyph"]
components = attrs["components"]
lig.Component = components.split(",") if components else []
+ lig.CompCount = len(lig.Component)
ligs.append(lig)
@@ -1495,7 +1496,7 @@ def _buildClasses():
import re
from .otData import otData
- formatPat = re.compile("([A-Za-z0-9]+)Format(\d+)$")
+ formatPat = re.compile(r"([A-Za-z0-9]+)Format(\d+)$")
namespace = globals()
# populate module with classes
diff --git a/Lib/fontTools/ufoLib/filenames.py b/Lib/fontTools/ufoLib/filenames.py
index 98f53b1f..1c5630f0 100644
--- a/Lib/fontTools/ufoLib/filenames.py
+++ b/Lib/fontTools/ufoLib/filenames.py
@@ -6,7 +6,7 @@ from __future__ import absolute_import, unicode_literals
from fontTools.misc.py23 import basestring, unicode
-illegalCharacters = "\" * + / : < > ? [ \ ] | \0".split(" ")
+illegalCharacters = r"\" * + / : < > ? [ \ ] | \0".split(" ")
illegalCharacters += [chr(i) for i in range(1, 32)]
illegalCharacters += [chr(0x7F)]
reservedFileNames = "CON PRN AUX CLOCK$ NUL A:-Z: COM1".lower().split(" ")
diff --git a/Lib/fontTools/varLib/__init__.py b/Lib/fontTools/varLib/__init__.py
index 0543ee37..db567062 100644
--- a/Lib/fontTools/varLib/__init__.py
+++ b/Lib/fontTools/varLib/__init__.py
@@ -463,46 +463,120 @@ def _merge_TTHinting(font, masterModel, master_ttfs, tolerance=0.5):
var = TupleVariation(support, delta)
cvar.variations.append(var)
+MetricsFields = namedtuple('MetricsFields',
+ ['tableTag', 'metricsTag', 'sb1', 'sb2', 'advMapping', 'vOrigMapping'])
+
+hvarFields = MetricsFields(tableTag='HVAR', metricsTag='hmtx', sb1='LsbMap',
+ sb2='RsbMap', advMapping='AdvWidthMap', vOrigMapping=None)
+
+vvarFields = MetricsFields(tableTag='VVAR', metricsTag='vmtx', sb1='TsbMap',
+ sb2='BsbMap', advMapping='AdvHeightMap', vOrigMapping='VOrgMap')
+
def _add_HVAR(font, masterModel, master_ttfs, axisTags):
+ _add_VHVAR(font, masterModel, master_ttfs, axisTags, hvarFields)
+
+def _add_VVAR(font, masterModel, master_ttfs, axisTags):
+ _add_VHVAR(font, masterModel, master_ttfs, axisTags, vvarFields)
- log.info("Generating HVAR")
+def _add_VHVAR(font, masterModel, master_ttfs, axisTags, tableFields):
+
+ tableTag = tableFields.tableTag
+ assert tableTag not in font
+ log.info("Generating " + tableTag)
+ VHVAR = newTable(tableTag)
+ tableClass = getattr(ot, tableTag)
+ vhvar = VHVAR.table = tableClass()
+ vhvar.Version = 0x00010000
glyphOrder = font.getGlyphOrder()
- hAdvanceDeltasAndSupports = {}
- metricses = [m["hmtx"].metrics for m in master_ttfs]
+ # Build list of source font advance widths for each glyph
+ metricsTag = tableFields.metricsTag
+ advMetricses = [m[metricsTag].metrics for m in master_ttfs]
+
+ # Build list of source font vertical origin coords for each glyph
+ if tableTag == 'VVAR' and 'VORG' in master_ttfs[0]:
+ vOrigMetricses = [m['VORG'].VOriginRecords for m in master_ttfs]
+ defaultYOrigs = [m['VORG'].defaultVertOriginY for m in master_ttfs]
+ vOrigMetricses = list(zip(vOrigMetricses, defaultYOrigs))
+ else:
+ vOrigMetricses = None
+
+ metricsStore, advanceMapping, vOrigMapping = _get_advance_metrics(font,
+ masterModel, master_ttfs, axisTags, glyphOrder, advMetricses,
+ vOrigMetricses)
+
+ vhvar.VarStore = metricsStore
+ if advanceMapping is None:
+ setattr(vhvar, tableFields.advMapping, None)
+ else:
+ setattr(vhvar, tableFields.advMapping, advanceMapping)
+ if vOrigMapping is not None:
+ setattr(vhvar, tableFields.vOrigMapping, vOrigMapping)
+ setattr(vhvar, tableFields.sb1, None)
+ setattr(vhvar, tableFields.sb2, None)
+
+ font[tableTag] = VHVAR
+ return
+
+def _get_advance_metrics(font, masterModel, master_ttfs,
+ axisTags, glyphOrder, advMetricses, vOrigMetricses=None):
+
+ vhAdvanceDeltasAndSupports = {}
+ vOrigDeltasAndSupports = {}
for glyph in glyphOrder:
- hAdvances = [metrics[glyph][0] if glyph in metrics else None for metrics in metricses]
- hAdvanceDeltasAndSupports[glyph] = masterModel.getDeltasAndSupports(hAdvances)
+ vhAdvances = [metrics[glyph][0] if glyph in metrics else None for metrics in advMetricses]
+ vhAdvanceDeltasAndSupports[glyph] = masterModel.getDeltasAndSupports(vhAdvances)
- singleModel = models.allEqual(id(v[1]) for v in hAdvanceDeltasAndSupports.values())
+ singleModel = models.allEqual(id(v[1]) for v in vhAdvanceDeltasAndSupports.values())
+
+ if vOrigMetricses:
+ singleModel = False
+ for glyph in glyphOrder:
+ # We need to supply a vOrigs tuple with non-None default values
+ # for each glyph. vOrigMetricses contains values only for those
+ # glyphs which have a non-default vOrig.
+ vOrigs = [metrics[glyph] if glyph in metrics else defaultVOrig
+ for metrics, defaultVOrig in vOrigMetricses]
+ vOrigDeltasAndSupports[glyph] = masterModel.getDeltasAndSupports(vOrigs)
directStore = None
if singleModel:
# Build direct mapping
-
- supports = next(iter(hAdvanceDeltasAndSupports.values()))[1][1:]
+ supports = next(iter(vhAdvanceDeltasAndSupports.values()))[1][1:]
varTupleList = builder.buildVarRegionList(supports, axisTags)
varTupleIndexes = list(range(len(supports)))
varData = builder.buildVarData(varTupleIndexes, [], optimize=False)
for glyphName in glyphOrder:
- varData.addItem(hAdvanceDeltasAndSupports[glyphName][0])
+ varData.addItem(vhAdvanceDeltasAndSupports[glyphName][0])
varData.optimize()
directStore = builder.buildVarStore(varTupleList, [varData])
# Build optimized indirect mapping
storeBuilder = varStore.OnlineVarStoreBuilder(axisTags)
- mapping = {}
+ advMapping = {}
for glyphName in glyphOrder:
- deltas,supports = hAdvanceDeltasAndSupports[glyphName]
+ deltas, supports = vhAdvanceDeltasAndSupports[glyphName]
storeBuilder.setSupports(supports)
- mapping[glyphName] = storeBuilder.storeDeltas(deltas)
+ advMapping[glyphName] = storeBuilder.storeDeltas(deltas)
+
+ if vOrigMetricses:
+ vOrigMap = {}
+ for glyphName in glyphOrder:
+ deltas, supports = vOrigDeltasAndSupports[glyphName]
+ storeBuilder.setSupports(supports)
+ vOrigMap[glyphName] = storeBuilder.storeDeltas(deltas)
+
indirectStore = storeBuilder.finish()
mapping2 = indirectStore.optimize()
- mapping = [mapping2[mapping[g]] for g in glyphOrder]
- advanceMapping = builder.buildVarIdxMap(mapping, glyphOrder)
+ advMapping = [mapping2[advMapping[g]] for g in glyphOrder]
+ advanceMapping = builder.buildVarIdxMap(advMapping, glyphOrder)
+
+ if vOrigMetricses:
+ vOrigMap = [mapping2[vOrigMap[g]] for g in glyphOrder]
- use_direct = False
+ useDirect = False
+ vOrigMapping = None
if directStore:
# Compile both, see which is more compact
@@ -515,20 +589,17 @@ def _add_HVAR(font, masterModel, master_ttfs, axisTags):
advanceMapping.compile(writer, font)
indirectSize = len(writer.getAllData())
- use_direct = directSize < indirectSize
-
- # Done; put it all together.
- assert "HVAR" not in font
- HVAR = font["HVAR"] = newTable('HVAR')
- hvar = HVAR.table = ot.HVAR()
- hvar.Version = 0x00010000
- hvar.LsbMap = hvar.RsbMap = None
- if use_direct:
- hvar.VarStore = directStore
- hvar.AdvWidthMap = None
+ useDirect = directSize < indirectSize
+
+ if useDirect:
+ metricsStore = directStore
+ advanceMapping = None
else:
- hvar.VarStore = indirectStore
- hvar.AdvWidthMap = advanceMapping
+ metricsStore = indirectStore
+ if vOrigMetricses:
+ vOrigMapping = builder.buildVarIdxMap(vOrigMap, glyphOrder)
+
+ return metricsStore, advanceMapping, vOrigMapping
def _add_MVAR(font, masterModel, master_ttfs, axisTags):
@@ -688,12 +759,11 @@ _DesignSpaceData = namedtuple(
def _add_CFF2(varFont, model, master_fonts):
- from .cff import (convertCFFtoCFF2, addCFFVarStore, merge_region_fonts)
+ from .cff import (convertCFFtoCFF2, merge_region_fonts)
glyphOrder = varFont.getGlyphOrder()
convertCFFtoCFF2(varFont)
ordered_fonts_list = model.reorderMasters(master_fonts, model.reverseMapping)
# re-ordering the master list simplifies building the CFF2 data item lists.
- addCFFVarStore(varFont, model) # Add VarStore to the CFF2 font.
merge_region_fonts(varFont, model, ordered_fonts_list, glyphOrder)
@@ -840,6 +910,8 @@ def build(designspace, master_finder=lambda s:s, exclude=[], optimize=True):
_add_MVAR(vf, model, master_fonts, axisTags)
if 'HVAR' not in exclude:
_add_HVAR(vf, model, master_fonts, axisTags)
+ if 'VVAR' not in exclude and 'vmtx' in vf:
+ _add_VVAR(vf, model, master_fonts, axisTags)
if 'GDEF' not in exclude or 'GPOS' not in exclude:
_merge_OTL(vf, model, master_fonts, axisTags)
if 'gvar' not in exclude and 'glyf' in vf:
@@ -850,6 +922,13 @@ def build(designspace, master_finder=lambda s:s, exclude=[], optimize=True):
_add_GSUB_feature_variations(vf, ds.axes, ds.internal_axis_supports, ds.rules)
if 'CFF2' not in exclude and 'CFF ' in vf:
_add_CFF2(vf, model, master_fonts)
+ if "post" in vf:
+ # set 'post' to format 2 to keep the glyph names dropped from CFF2
+ post = vf["post"]
+ if post.formatType != 2.0:
+ post.formatType = 2.0
+ post.extraNames = []
+ post.mapping = {}
for tag in exclude:
if tag in vf:
@@ -859,7 +938,7 @@ def build(designspace, master_finder=lambda s:s, exclude=[], optimize=True):
return vf, model, master_ttfs
-def _open_font(path, master_finder):
+def _open_font(path, master_finder=lambda s: s):
# load TTFont masters from given 'path': this can be either a .TTX or an
# OpenType binary font; or if neither of these, try use the 'master_finder'
# callable to resolve the path to a valid .TTX or OpenType font binary.
@@ -893,35 +972,17 @@ def load_masters(designspace, master_finder=lambda s: s):
Return list of master TTFont objects in the same order they are listed in the
DesignSpaceDocument.
"""
- master_fonts = []
-
for master in designspace.sources:
- # 1. If the caller already supplies a TTFont for a source, just take it.
- if master.font:
- font = master.font
- master_fonts.append(font)
- else:
- # If a SourceDescriptor has a layer name, demand that the compiled TTFont
- # be supplied by the caller. This spares us from modifying MasterFinder.
- if master.layerName:
- raise AttributeError(
- "Designspace source '%s' specified a layer name but lacks the "
- "required TTFont object in the 'font' attribute."
- % (master.name or "<Unknown>")
+ # If a SourceDescriptor has a layer name, demand that the compiled TTFont
+ # be supplied by the caller. This spares us from modifying MasterFinder.
+ if master.layerName and master.font is None:
+ raise AttributeError(
+ "Designspace source '%s' specified a layer name but lacks the "
+ "required TTFont object in the 'font' attribute."
+ % (master.name or "<Unknown>")
)
- else:
- if master.path is None:
- raise AttributeError(
- "Designspace source '%s' has neither 'font' nor 'path' "
- "attributes" % (master.name or "<Unknown>")
- )
- # 2. A SourceDescriptor's path might point an OpenType binary, a
- # TTX file, or another source file (e.g. UFO), in which case we
- # resolve the path using 'master_finder' function
- master.font = font = _open_font(master.path, master_finder)
- master_fonts.append(font)
-
- return master_fonts
+
+ return designspace.loadSourceFonts(_open_font, master_finder=master_finder)
class MasterFinder(object):
diff --git a/Lib/fontTools/varLib/builder.py b/Lib/fontTools/varLib/builder.py
index e923b800..43fb92a1 100644
--- a/Lib/fontTools/varLib/builder.py
+++ b/Lib/fontTools/varLib/builder.py
@@ -15,7 +15,6 @@ def buildVarRegion(support, axisTags):
self.VarRegionAxis = []
for tag in axisTags:
self.VarRegionAxis.append(buildVarRegionAxis(support.get(tag, (0,0,0))))
- self.VarRegionAxisCount = len(self.VarRegionAxis)
return self
def buildVarRegionList(supports, axisTags):
diff --git a/Lib/fontTools/varLib/cff.py b/Lib/fontTools/varLib/cff.py
index a000dd48..a8da2145 100644..100755
--- a/Lib/fontTools/varLib/cff.py
+++ b/Lib/fontTools/varLib/cff.py
@@ -1,7 +1,5 @@
+from collections import namedtuple
import os
-from fontTools.misc.py23 import BytesIO
-from fontTools.misc.psCharStrings import T2CharString, T2OutlineExtractor
-from fontTools.pens.t2CharStringPen import T2CharStringPen, t2c_round
from fontTools.cffLib import (
maxStackLimit,
TopDictIndex,
@@ -14,20 +12,21 @@ from fontTools.cffLib import (
FontDict,
VarStoreData
)
-from fontTools.cffLib.specializer import (commandsToProgram, specializeCommands)
+from fontTools.misc.py23 import BytesIO
+from fontTools.cffLib.specializer import (
+ specializeCommands, commandsToProgram)
from fontTools.ttLib import newTable
from fontTools import varLib
from fontTools.varLib.models import allEqual
+from fontTools.misc.psCharStrings import T2CharString, T2OutlineExtractor
+from fontTools.pens.t2CharStringPen import T2CharStringPen, t2c_round
-def addCFFVarStore(varFont, varModel):
- supports = varModel.supports[1:]
+def addCFFVarStore(varFont, varModel, varDataList, masterSupports):
fvarTable = varFont['fvar']
axisKeys = [axis.axisTag for axis in fvarTable.axes]
- varTupleList = varLib.builder.buildVarRegionList(supports, axisKeys)
- varTupleIndexes = list(range(len(supports)))
- varDeltasCFFV = varLib.builder.buildVarData(varTupleIndexes, None, False)
- varStoreCFFV = varLib.builder.buildVarStore(varTupleList, [varDeltasCFFV])
+ varTupleList = varLib.builder.buildVarRegionList(masterSupports, axisKeys)
+ varStoreCFFV = varLib.builder.buildVarStore(varTupleList, varDataList)
topDict = varFont['CFF2'].cff.topDictIndex[0]
topDict.VarStore = VarStoreData(otVarStore=varStoreCFFV)
@@ -143,16 +142,61 @@ pd_blend_fields = ("BlueValues", "OtherBlues", "FamilyBlues",
"StemSnapV")
-def merge_PrivateDicts(topDict, region_top_dicts, num_masters, var_model):
+def get_private(regionFDArrays, fd_index, ri, fd_map):
+ region_fdArray = regionFDArrays[ri]
+ region_fd_map = fd_map[fd_index]
+ if ri in region_fd_map:
+ region_fdIndex = region_fd_map[ri]
+ private = region_fdArray[region_fdIndex].Private
+ else:
+ private = None
+ return private
+
+
+def merge_PrivateDicts(top_dicts, vsindex_dict, var_model, fd_map):
+ """
+ I step through the FontDicts in the FDArray of the varfont TopDict.
+ For each varfont FontDict:
+ step through each key in FontDict.Private.
+ For each key, step through each relevant source font Private dict, and
+ build a list of values to blend.
+ The 'relevant' source fonts are selected by first getting the right
+ submodel using model_keys[vsindex]. The indices of the
+ subModel.locations are mapped to source font list indices by
+ assuming the latter order is the same as the order of the
+ var_model.locations. I can then get the index of each subModel
+ location in the list of var_model.locations.
+ """
+
+ topDict = top_dicts[0]
+ region_top_dicts = top_dicts[1:]
if hasattr(region_top_dicts[0], 'FDArray'):
regionFDArrays = [fdTopDict.FDArray for fdTopDict in region_top_dicts]
else:
regionFDArrays = [[fdTopDict] for fdTopDict in region_top_dicts]
for fd_index, font_dict in enumerate(topDict.FDArray):
private_dict = font_dict.Private
- pds = [private_dict] + [
- regionFDArray[fd_index].Private for regionFDArray in regionFDArrays
- ]
+ vsindex = getattr(private_dict, 'vsindex', 0)
+ # At the moment, no PrivateDict has a vsindex key, but let's support
+ # how it should work. See comment at end of
+ # merge_charstrings() - still need to optimize use of vsindex.
+ sub_model, model_keys = vsindex_dict[vsindex]
+ master_indices = []
+ for loc in sub_model.locations[1:]:
+ i = var_model.locations.index(loc) - 1
+ master_indices.append(i)
+ pds = [private_dict]
+ last_pd = private_dict
+ for ri in master_indices:
+ pd = get_private(regionFDArrays, fd_index, ri, fd_map)
+ # If the region font doesn't have this FontDict, just reference
+ # the last one used.
+ if pd is None:
+ pd = last_pd
+ else:
+ last_pd = pd
+ pds.append(pd)
+ num_masters = len(pds)
for key, value in private_dict.rawDict.items():
if key not in pd_blend_fields:
continue
@@ -192,7 +236,7 @@ def merge_PrivateDicts(topDict, region_top_dicts, num_masters, var_model):
if (not any_points_differ) and not allEqual(rel_list):
any_points_differ = True
prev_val_list = val_list
- deltas = var_model.getDeltas(rel_list)
+ deltas = sub_model.getDeltas(rel_list)
# Convert numbers with no decimal part to an int.
deltas = [conv_to_int(delta) for delta in deltas]
# For PrivateDict BlueValues, the default font
@@ -206,61 +250,159 @@ def merge_PrivateDicts(topDict, region_top_dicts, num_masters, var_model):
else:
values = [pd.rawDict[key] for pd in pds]
if not allEqual(values):
- dataList = var_model.getDeltas(values)
+ dataList = sub_model.getDeltas(values)
else:
dataList = values[0]
private_dict.rawDict[key] = dataList
+def getfd_map(varFont, fonts_list):
+ """ Since a subset source font may have fewer FontDicts in their
+ FDArray than the default font, we have to match up the FontDicts in
+ the different fonts . We do this with the FDSelect array, and by
+ assuming that the same glyph will reference matching FontDicts in
+ each source font. We return a mapping from fdIndex in the default
+ font to a dictionary which maps each master list index of each
+ region font to the equivalent fdIndex in the region font."""
+ fd_map = {}
+ default_font = fonts_list[0]
+ region_fonts = fonts_list[1:]
+ num_regions = len(region_fonts)
+ topDict = default_font['CFF '].cff.topDictIndex[0]
+ if not hasattr(topDict, 'FDSelect'):
+ fd_map[0] = [0]*num_regions
+ return fd_map
+
+ gname_mapping = {}
+ default_fdSelect = topDict.FDSelect
+ glyphOrder = default_font.getGlyphOrder()
+ for gid, fdIndex in enumerate(default_fdSelect):
+ gname_mapping[glyphOrder[gid]] = fdIndex
+ if fdIndex not in fd_map:
+ fd_map[fdIndex] = {}
+ for ri, region_font in enumerate(region_fonts):
+ region_glyphOrder = region_font.getGlyphOrder()
+ region_topDict = region_font['CFF '].cff.topDictIndex[0]
+ if not hasattr(region_topDict, 'FDSelect'):
+ # All the glyphs share the same FontDict. Pick any glyph.
+ default_fdIndex = gname_mapping[region_glyphOrder[0]]
+ fd_map[default_fdIndex][ri] = 0
+ else:
+ region_fdSelect = region_topDict.FDSelect
+ for gid, fdIndex in enumerate(region_fdSelect):
+ default_fdIndex = gname_mapping[region_glyphOrder[gid]]
+ region_map = fd_map[default_fdIndex]
+ if ri not in region_map:
+ region_map[ri] = fdIndex
+ return fd_map
+
+
+CVarData = namedtuple('CVarData', 'varDataList masterSupports vsindex_dict')
def merge_region_fonts(varFont, model, ordered_fonts_list, glyphOrder):
topDict = varFont['CFF2'].cff.topDictIndex[0]
- default_charstrings = topDict.CharStrings
- region_fonts = ordered_fonts_list[1:]
- region_top_dicts = [
- ttFont['CFF '].cff.topDictIndex[0] for ttFont in region_fonts
- ]
+ top_dicts = [topDict] + [
+ ttFont['CFF '].cff.topDictIndex[0]
+ for ttFont in ordered_fonts_list[1:]
+ ]
num_masters = len(model.mapping)
- merge_PrivateDicts(topDict, region_top_dicts, num_masters, model)
- merge_charstrings(default_charstrings,
- glyphOrder,
- num_masters,
- region_top_dicts, model)
-
-
-def merge_charstrings(default_charstrings,
- glyphOrder,
- num_masters,
- region_top_dicts,
- var_model):
- for gname in glyphOrder:
- default_charstring = default_charstrings[gname]
+ cvData = merge_charstrings(glyphOrder, num_masters, top_dicts, model)
+ fd_map = getfd_map(varFont, ordered_fonts_list)
+ merge_PrivateDicts(top_dicts, cvData.vsindex_dict, model, fd_map)
+ addCFFVarStore(varFont, model, cvData.varDataList,
+ cvData.masterSupports)
+
+
+def _get_cs(charstrings, glyphName):
+ if glyphName not in charstrings:
+ return None
+ return charstrings[glyphName]
+
+
+def merge_charstrings(glyphOrder, num_masters, top_dicts, masterModel):
+
+ vsindex_dict = {}
+ vsindex_by_key = {}
+ varDataList = []
+ masterSupports = []
+ default_charstrings = top_dicts[0].CharStrings
+ for gid, gname in enumerate(glyphOrder):
+ all_cs = [
+ _get_cs(td.CharStrings, gname)
+ for td in top_dicts]
+ if len([gs for gs in all_cs if gs is not None]) == 1:
+ continue
+ model, model_cs = masterModel.getSubModel(all_cs)
+ # create the first pass CFF2 charstring, from
+ # the default charstring.
+ default_charstring = model_cs[0]
var_pen = CFF2CharStringMergePen([], gname, num_masters, 0)
- default_charstring.outlineExtractor = CFFToCFF2OutlineExtractor
+ # We need to override outlineExtractor because these
+ # charstrings do have widths in the 'program'; we need to drop these
+ # values rather than post assertion error for them.
+ default_charstring.outlineExtractor = MergeOutlineExtractor
default_charstring.draw(var_pen)
- for region_idx, region_td in enumerate(region_top_dicts, start=1):
- region_charstrings = region_td.CharStrings
- region_charstring = region_charstrings[gname]
+
+ # Add the coordinates from all the other regions to the
+ # blend lists in the CFF2 charstring.
+ region_cs = model_cs[1:]
+ for region_idx, region_charstring in enumerate(region_cs, start=1):
var_pen.restart(region_idx)
+ region_charstring.outlineExtractor = MergeOutlineExtractor
region_charstring.draw(var_pen)
- new_charstring = var_pen.getCharString(
+
+ # Collapse each coordinate list to a blend operator and its args.
+ new_cs = var_pen.getCharString(
private=default_charstring.private,
globalSubrs=default_charstring.globalSubrs,
- var_model=var_model, optimize=True)
- default_charstrings[gname] = new_charstring
+ var_model=model, optimize=True)
+ default_charstrings[gname] = new_cs
+
+ if (not var_pen.seen_moveto) or ('blend' not in new_cs.program):
+ # If this is not a marking glyph, or if there are no blend
+ # arguments, then we can use vsindex 0. No need to
+ # check if we need a new vsindex.
+ continue
+
+ # If the charstring required a new model, create
+ # a VarData table to go with, and set vsindex.
+ try:
+ key = tuple(v is not None for v in all_cs)
+ vsindex = vsindex_by_key[key]
+ except KeyError:
+ varTupleIndexes = []
+ for support in model.supports[1:]:
+ if support not in masterSupports:
+ masterSupports.append(support)
+ varTupleIndexes.append(masterSupports.index(support))
+ var_data = varLib.builder.buildVarData(varTupleIndexes, None, False)
+ vsindex = len(vsindex_dict)
+ vsindex_by_key[key] = vsindex
+ vsindex_dict[vsindex] = (model, [key])
+ varDataList.append(var_data)
+ # We do not need to check for an existing new_cs.private.vsindex,
+ # as we know it doesn't exist yet.
+ if vsindex != 0:
+ new_cs.program[:0] = [vsindex, 'vsindex']
+ cvData = CVarData(varDataList=varDataList, masterSupports=masterSupports,
+ vsindex_dict=vsindex_dict)
+ # XXX To do: optimize use of vsindex between the PrivateDicts and
+ # charstrings
+ return cvData
class MergeTypeError(TypeError):
def __init__(self, point_type, pt_index, m_index, default_type, glyphName):
- self.error_msg = [
- "In glyph '{gname}' "
- "'{point_type}' at point index {pt_index} in master "
- "index {m_index} differs from the default font point "
- "type '{default_type}'"
- "".format(gname=glyphName,
- point_type=point_type, pt_index=pt_index,
- m_index=m_index, default_type=default_type)
- ][0]
- super(MergeTypeError, self).__init__(self.error_msg)
+ self.error_msg = [
+ "In glyph '{gname}' "
+ "'{point_type}' at point index {pt_index} in master "
+ "index {m_index} differs from the default font point "
+ "type '{default_type}'"
+ "".format(
+ gname=glyphName,
+ point_type=point_type, pt_index=pt_index,
+ m_index=m_index, default_type=default_type)
+ ][0]
+ super(MergeTypeError, self).__init__(self.error_msg)
def makeRoundNumberFunc(tolerance):
@@ -274,10 +416,9 @@ def makeRoundNumberFunc(tolerance):
class CFFToCFF2OutlineExtractor(T2OutlineExtractor):
- """ This class is used to remove the initial width
- from the CFF charstring without adding the width
- to self.nominalWidthX, which is None.
- """
+ """ This class is used to remove the initial width from the CFF
+ charstring without trying to add the width to self.nominalWidthX,
+ which is None. """
def popallWidth(self, evenOdd=0):
args = self.popall()
if not self.gotWidth:
@@ -288,60 +429,127 @@ class CFFToCFF2OutlineExtractor(T2OutlineExtractor):
return args
+class MergeOutlineExtractor(CFFToCFF2OutlineExtractor):
+ """ Used to extract the charstring commands - including hints - from a
+ CFF charstring in order to merge it as another set of region data
+ into a CFF2 variable font charstring."""
+
+ def __init__(self, pen, localSubrs, globalSubrs,
+ nominalWidthX, defaultWidthX, private=None):
+ super(CFFToCFF2OutlineExtractor, self).__init__(pen, localSubrs,
+ globalSubrs, nominalWidthX, defaultWidthX, private)
+
+ def countHints(self):
+ args = self.popallWidth()
+ self.hintCount = self.hintCount + len(args) // 2
+ return args
+
+ def _hint_op(self, type, args):
+ self.pen.add_hint(type, args)
+
+ def op_hstem(self, index):
+ args = self.countHints()
+ self._hint_op('hstem', args)
+
+ def op_vstem(self, index):
+ args = self.countHints()
+ self._hint_op('vstem', args)
+
+ def op_hstemhm(self, index):
+ args = self.countHints()
+ self._hint_op('hstemhm', args)
+
+ def op_vstemhm(self, index):
+ args = self.countHints()
+ self._hint_op('vstemhm', args)
+
+ def _get_hintmask(self, index):
+ if not self.hintMaskBytes:
+ args = self.countHints()
+ if args:
+ self._hint_op('vstemhm', args)
+ self.hintMaskBytes = (self.hintCount + 7) // 8
+ hintMaskBytes, index = self.callingStack[-1].getBytes(index,
+ self.hintMaskBytes)
+ return index, hintMaskBytes
+
+ def op_hintmask(self, index):
+ index, hintMaskBytes = self._get_hintmask(index)
+ self.pen.add_hintmask('hintmask', [hintMaskBytes])
+ return hintMaskBytes, index
+
+ def op_cntrmask(self, index):
+ index, hintMaskBytes = self._get_hintmask(index)
+ self.pen.add_hintmask('cntrmask', [hintMaskBytes])
+ return hintMaskBytes, index
+
+
class CFF2CharStringMergePen(T2CharStringPen):
"""Pen to merge Type 2 CharStrings.
"""
- def __init__(self, default_commands,
- glyphName, num_masters, master_idx, roundTolerance=0.5):
+ def __init__(
+ self, default_commands, glyphName, num_masters, master_idx,
+ roundTolerance=0.5):
super(
CFF2CharStringMergePen,
- self).__init__(width=None,
- glyphSet=None, CFF2=True,
- roundTolerance=roundTolerance)
+ self).__init__(
+ width=None,
+ glyphSet=None, CFF2=True,
+ roundTolerance=roundTolerance)
self.pt_index = 0
self._commands = default_commands
self.m_index = master_idx
self.num_masters = num_masters
self.prev_move_idx = 0
+ self.seen_moveto = False
self.glyphName = glyphName
self.roundNumber = makeRoundNumberFunc(roundTolerance)
- def _p(self, pt):
- """ Unlike T2CharstringPen, this class stores absolute values.
- This is to allow the logic in check_and_fix_closepath() to work,
- where the current or previous absolute point has to be compared to
- the path start-point.
- """
- self._p0 = pt
- return list(self._p0)
-
def add_point(self, point_type, pt_coords):
if self.m_index == 0:
self._commands.append([point_type, [pt_coords]])
else:
cmd = self._commands[self.pt_index]
if cmd[0] != point_type:
- # Fix some issues that show up in some
- # CFF workflows, even when fonts are
- # topologically merge compatible.
- success, pt_coords = self.check_and_fix_flat_curve(
- cmd, point_type, pt_coords)
- if not success:
- success = self.check_and_fix_closepath(
- cmd, point_type, pt_coords)
- if success:
- # We may have incremented self.pt_index
- cmd = self._commands[self.pt_index]
- if cmd[0] != point_type:
- success = False
- if not success:
- raise MergeTypeError(point_type,
- self.pt_index, len(cmd[1]),
- cmd[0], self.glyphName)
+ raise MergeTypeError(
+ point_type,
+ self.pt_index, len(cmd[1]),
+ cmd[0], self.glyphName)
cmd[1].append(pt_coords)
self.pt_index += 1
+ def add_hint(self, hint_type, args):
+ if self.m_index == 0:
+ self._commands.append([hint_type, [args]])
+ else:
+ cmd = self._commands[self.pt_index]
+ if cmd[0] != hint_type:
+ raise MergeTypeError(hint_type, self.pt_index, len(cmd[1]),
+ cmd[0], self.glyphName)
+ cmd[1].append(args)
+ self.pt_index += 1
+
+ def add_hintmask(self, hint_type, abs_args):
+ # For hintmask, fonttools.cffLib.specializer.py expects
+ # each of these to be represented by two sequential commands:
+ # first holding only the operator name, with an empty arg list,
+ # second with an empty string as the op name, and the mask arg list.
+ if self.m_index == 0:
+ self._commands.append([hint_type, []])
+ self._commands.append(["", [abs_args]])
+ else:
+ cmd = self._commands[self.pt_index]
+ if cmd[0] != hint_type:
+ raise MergeTypeError(hint_type, self.pt_index, len(cmd[1]),
+ cmd[0], self.glyphName)
+ self.pt_index += 1
+ cmd = self._commands[self.pt_index]
+ cmd[1].append(abs_args)
+ self.pt_index += 1
+
def _moveTo(self, pt):
+ if not self.seen_moveto:
+ self.seen_moveto = True
pt_coords = self._p(pt)
self.add_point('rmoveto', pt_coords)
# I set prev_move_idx here because add_point()
@@ -371,7 +579,7 @@ class CFF2CharStringMergePen(T2CharStringPen):
def getCommands(self):
return self._commands
- def reorder_blend_args(self, commands):
+ def reorder_blend_args(self, commands, get_delta_func, round_func):
"""
We first re-order the master coordinate values.
For a moveto to lineto, the args are now arranged as:
@@ -380,9 +588,13 @@ class CFF2CharStringMergePen(T2CharStringPen):
[ [master_0 x, master_1 x, master_2 x],
[master_0 y, master_1 y, master_2 y]
]
- We also make the value relative.
If the master values are all the same, we collapse the list to
as single value instead of a list.
+
+ We then convert this to:
+ [ [master_0 x] + [x delta tuple] + [numBlends=1]
+ [master_0 y] + [y delta tuple] + [numBlends=1]
+ ]
"""
for cmd in commands:
# arg[i] is the set of arguments for this operator from master i.
@@ -390,113 +602,46 @@ class CFF2CharStringMergePen(T2CharStringPen):
m_args = zip(*args)
# m_args[n] is now all num_master args for the i'th argument
# for this operation.
- cmd[1] = m_args
-
- # Now convert from absolute to relative
- x0 = [0]*self.num_masters
- y0 = [0]*self.num_masters
- for cmd in self._commands:
- is_x = True
- coords = cmd[1]
- rel_coords = []
- for coord in coords:
- prev_coord = x0 if is_x else y0
- rel_coord = [pt[0] - pt[1] for pt in zip(coord, prev_coord)]
-
- if allEqual(rel_coord):
- rel_coord = rel_coord[0]
- rel_coords.append(rel_coord)
- if is_x:
- x0 = coord
- else:
- y0 = coord
- is_x = not is_x
- cmd[1] = rel_coords
- return commands
-
- @staticmethod
- def mergeCommandsToProgram(commands, var_model, round_func):
- """
- Takes a commands list as returned by programToCommands() and
- converts it back to a T2CharString or CFF2Charstring program list. I
- need to use this rather than specialize.commandsToProgram, as the
- commands produced by CFF2CharStringMergePen initially contains a
- list of coordinate values, one for each master, wherever a single
- coordinate value is expected by the regular logic. The problem with
- doing using the specialize.py functions is that a commands list is
- expected to be a op name with its associated argument list. For the
- commands list here, some of the arguments may need to be converted
- to a new argument list and opcode.
- This version will convert each list of master arguments to a blend
- op and its arguments, and will also combine successive blend ops up
- to the stack limit.
- """
- program = []
- for op, args in commands:
- num_args = len(args)
- # some of the args may be blend lists, and some may be
- # single coordinate values.
- i = 0
- stack_use = 0
- while i < num_args:
- arg = args[i]
- if not isinstance(arg, list):
- program.append(arg)
- i += 1
- stack_use += 1
- else:
- prev_stack_use = stack_use
- """ The arg is a tuple of blend values.
- These are each (master 0,master 1..master n)
- Combine as many successive tuples as we can,
- up to the max stack limit.
- """
- num_masters = len(arg)
- blendlist = [arg]
- i += 1
- stack_use += 1 + num_masters # 1 for the num_blends arg
- while (i < num_args) and isinstance(args[i], list):
- blendlist.append(args[i])
- i += 1
- stack_use += num_masters
- if stack_use + num_masters > maxStackLimit:
- # if we are here, max stack is is the CFF2 max stack.
- break
- num_blends = len(blendlist)
- # append the 'num_blends' default font values
- for arg in blendlist:
- if round_func:
- arg[0] = round_func(arg[0])
- program.append(arg[0])
- for arg in blendlist:
- # for each coordinate tuple, append the region deltas
- if len(arg) != 3:
- print(arg)
- import pdb
- pdb.set_trace()
- deltas = var_model.getDeltas(arg)
+ cmd[1] = list(m_args)
+ lastOp = None
+ for cmd in commands:
+ op = cmd[0]
+ # masks are represented by two cmd's: first has only op names,
+ # second has only args.
+ if lastOp in ['hintmask', 'cntrmask']:
+ coord = list(cmd[1])
+ assert allEqual(coord), (
+ "hintmask values cannot differ between source fonts.")
+ cmd[1] = [coord[0][0]]
+ else:
+ coords = cmd[1]
+ new_coords = []
+ for coord in coords:
+ if allEqual(coord):
+ new_coords.append(coord[0])
+ else:
+ # convert to deltas
+ deltas = get_delta_func(coord)[1:]
if round_func:
deltas = [round_func(delta) for delta in deltas]
- # First item in 'deltas' is the default master value;
- # for CFF2 data, that has already been written.
- program.extend(deltas[1:])
- program.append(num_blends)
- program.append('blend')
- stack_use = prev_stack_use + num_blends
- if op:
- program.append(op)
- return program
-
-
- def getCharString(self, private=None, globalSubrs=None,
- var_model=None, optimize=True):
+ coord = [coord[0]] + deltas
+ new_coords.append(coord)
+ cmd[1] = new_coords
+ lastOp = op
+ return commands
+
+ def getCharString(
+ self, private=None, globalSubrs=None,
+ var_model=None, optimize=True):
commands = self._commands
- commands = self.reorder_blend_args(commands)
+ commands = self.reorder_blend_args(commands, var_model.getDeltas,
+ self.roundNumber)
if optimize:
- commands = specializeCommands(commands, generalizeFirst=False,
- maxstack=maxStackLimit)
- program = self.mergeCommandsToProgram(commands, var_model=var_model,
- round_func=self.roundNumber)
- charString = T2CharString(program=program, private=private,
- globalSubrs=globalSubrs)
+ commands = specializeCommands(
+ commands, generalizeFirst=False,
+ maxstack=maxStackLimit)
+ program = commandsToProgram(commands)
+ charString = T2CharString(
+ program=program, private=private,
+ globalSubrs=globalSubrs)
return charString
diff --git a/Lib/fontTools/varLib/mutator.py b/Lib/fontTools/varLib/mutator.py
index de612365..04ab3577 100644
--- a/Lib/fontTools/varLib/mutator.py
+++ b/Lib/fontTools/varLib/mutator.py
@@ -65,24 +65,29 @@ def interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder):
charstrings = topDict.CharStrings
for gname in glyphOrder:
# Interpolate charstring
+ # e.g replace blend op args with regular args,
+ # and use and discard vsindex op.
charstring = charstrings[gname]
- pd = charstring.private
- vsindex = pd.vsindex if (hasattr(pd, 'vsindex')) else 0
- num_regions = pd.getNumRegions(vsindex)
- numMasters = num_regions + 1
new_program = []
+ vsindex = 0
last_i = 0
for i, token in enumerate(charstring.program):
- if token == 'blend':
+ if token == 'vsindex':
+ vsindex = charstring.program[i - 1]
+ if last_i != 0:
+ new_program.extend(charstring.program[last_i:i - 1])
+ last_i = i + 1
+ elif token == 'blend':
+ num_regions = charstring.getNumRegions(vsindex)
+ numMasters = 1 + num_regions
num_args = charstring.program[i - 1]
- """ The stack is now:
- ..args for following operations
- num_args values from the default font
- num_args tuples, each with numMasters-1 delta values
- num_blend_args
- 'blend'
- """
- argi = i - (num_args*numMasters + 1)
+ # The program list starting at program[i] is now:
+ # ..args for following operations
+ # num_args values from the default font
+ # num_args tuples, each with numMasters-1 delta values
+ # num_blend_args
+ # 'blend'
+ argi = i - (num_args * numMasters + 1)
end_args = tuplei = argi + num_args
while argi < end_args:
next_ti = tuplei + num_regions
diff --git a/Lib/fontTools/varLib/varStore.py b/Lib/fontTools/varLib/varStore.py
index 66d0c95a..f8ce8199 100644
--- a/Lib/fontTools/varLib/varStore.py
+++ b/Lib/fontTools/varLib/varStore.py
@@ -187,8 +187,10 @@ class VarStoreInstancer(object):
#
# Optimizations
#
+# retainFirstMap - If true, major 0 mappings are retained. Deltas for unused indices are zeroed
+# advIdxes - Set of major 0 indices for advance deltas to be listed first. Other major 0 indices follow.
-def VarStore_subset_varidxes(self, varIdxes, optimize=True):
+def VarStore_subset_varidxes(self, varIdxes, optimize=True, retainFirstMap=False, advIdxes=set()):
# Sort out used varIdxes by major/minor.
used = {}
@@ -217,10 +219,19 @@ def VarStore_subset_varidxes(self, varIdxes, optimize=True):
items = data.Item
newItems = []
- for minor in sorted(usedMinors):
- newMinor = len(newItems)
- newItems.append(items[minor])
- varDataMap[(major<<16)+minor] = (newMajor<<16)+newMinor
+ if major == 0 and retainFirstMap:
+ for minor in range(len(items)):
+ newItems.append(items[minor] if minor in usedMinors else [0] * len(items[minor]))
+ varDataMap[minor] = minor
+ else:
+ if major == 0:
+ minors = sorted(advIdxes) + sorted(usedMinors - advIdxes)
+ else:
+ minors = sorted(usedMinors)
+ for minor in minors:
+ newMinor = len(newItems)
+ newItems.append(items[minor])
+ varDataMap[(major<<16)+minor] = (newMajor<<16)+newMinor
data.Item = newItems
data.ItemCount = len(data.Item)
diff --git a/Lib/fontTools/voltLib/lexer.py b/Lib/fontTools/voltLib/lexer.py
index 271fe3b0..53102b99 100644
--- a/Lib/fontTools/voltLib/lexer.py
+++ b/Lib/fontTools/voltLib/lexer.py
@@ -39,10 +39,13 @@ class Lexer(object):
if token_type not in {Lexer.NEWLINE}:
return (token_type, token, location)
+ def location_(self):
+ column = self.pos_ - self.line_start_ + 1
+ return (self.filename_ or "<volt>", self.line_, column)
+
def next_(self):
self.scan_over_(Lexer.CHAR_WHITESPACE_)
- column = self.pos_ - self.line_start_ + 1
- location = (self.filename_, self.line_, column)
+ location = self.location_()
start = self.pos_
text = self.text_
limit = len(text)
diff --git a/Lib/fontTools/voltLib/parser.py b/Lib/fontTools/voltLib/parser.py
index 4fe10a0e..df035aa6 100644
--- a/Lib/fontTools/voltLib/parser.py
+++ b/Lib/fontTools/voltLib/parser.py
@@ -32,10 +32,19 @@ class Parser(object):
self.lookups_ = SymbolTable()
self.next_token_type_, self.next_token_ = (None, None)
self.next_token_location_ = None
- with open(path, "r") as f:
- self.lexer_ = Lexer(f.read(), path)
+ self.make_lexer_(path)
self.advance_lexer_()
+ def make_lexer_(self, file_or_path):
+ if hasattr(file_or_path, "read"):
+ filename = getattr(file_or_path, "name", None)
+ data = file_or_path.read()
+ else:
+ filename = file_or_path
+ with open(file_or_path, "r") as f:
+ data = f.read()
+ self.lexer_ = Lexer(data, filename)
+
def parse(self):
statements = self.doc_.statements
while self.next_token_type_ is not None:
diff --git a/Lib/fonttools.egg-info/PKG-INFO b/Lib/fonttools.egg-info/PKG-INFO
index d57498f3..d64332fc 100644
--- a/Lib/fonttools.egg-info/PKG-INFO
+++ b/Lib/fonttools.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: fonttools
-Version: 3.39.0
+Version: 3.41.2
Summary: Tools to manipulate font files
Home-page: http://github.com/fonttools/fonttools
Author: Just van Rossum
@@ -373,15 +373,15 @@ Description: |Travis Build Status| |Appveyor Build status| |Coverage Status| |Py
Olivier Berten, Samyak Bhuta, Erik van Blokland, Petr van Blokland,
Jelle Bosma, Sascha Brawer, Tom Byrer, Frédéric Coiffier, Vincent
- Connare, Dave Crossland, Simon Daniels, Behdad Esfahbod, Behnam
- Esfahbod, Hannes Famira, Sam Fishman, Matt Fontaine, Yannis Haralambous,
- Greg Hitchcock, Jeremie Hornus, Khaled Hosny, John Hudson, Denis Moyogo
- Jacquerye, Jack Jansen, Tom Kacvinsky, Jens Kutilek, Antoine Leca,
- Werner Lemberg, Tal Leming, Peter Lofting, Cosimo Lupo, Masaya Nakamura,
- Dave Opstad, Laurence Penney, Roozbeh Pournader, Garret Rieger, Read
- Roberts, Guido van Rossum, Just van Rossum, Andreas Seidel, Georg
- Seifert, Miguel Sousa, Adam Twardoch, Adrien Tétar, Vitaly Volkov, Paul
- Wise.
+ Connare, Dave Crossland, Simon Daniels, Peter Dekkers, Behdad Esfahbod,
+ Behnam Esfahbod, Hannes Famira, Sam Fishman, Matt Fontaine, Yannis
+ Haralambous, Greg Hitchcock, Jeremie Hornus, Khaled Hosny, John Hudson,
+ Denis Moyogo Jacquerye, Jack Jansen, Tom Kacvinsky, Jens Kutilek,
+ Antoine Leca, Werner Lemberg, Tal Leming, Peter Lofting, Cosimo Lupo,
+ Masaya Nakamura, Dave Opstad, Laurence Penney, Roozbeh Pournader, Garret
+ Rieger, Read Roberts, Guido van Rossum, Just van Rossum, Andreas Seidel,
+ Georg Seifert, Miguel Sousa, Adam Twardoch, Adrien Tétar, Vitaly Volkov,
+ Paul Wise.
Copyrights
~~~~~~~~~~
@@ -415,6 +415,64 @@ Description: |Travis Build Status| |Appveyor Build status| |Coverage Status| |Py
Changelog
~~~~~~~~~
+ 3.41.2 (released 2019-05-13)
+ ----------------------------
+
+ - [cffLib] Fixed issue when importing a ``CFF2`` variable font from XML, whereby
+ the VarStore state was not propagated to PrivateDict (#1598).
+ - [varLib] Don't drop ``post`` glyph names when building CFF2 variable font (#1609).
+
+
+ 3.41.1 (released 2019-05-13)
+ ----------------------------
+
+ - [designspaceLib] Added ``loadSourceFonts`` method to load source fonts using
+ custom opener function (#1606).
+ - [head] Round font bounding box coordinates to integers to fix compile error
+ if CFF font has float coordinates (#1604, #1605).
+ - [feaLib] Don't write ``None`` in ``ast.ValueRecord.asFea()`` (#1599).
+ - [subset] Fixed issue ``AssertionError`` when using ``--desubroutinize`` option
+ (#1590, #1594).
+ - [graphite] Fixed bug in ``Silf`` table's ``decompile`` method unmasked by
+ previous typo fix (#1597). Decode languange code as UTF-8 in ``Sill`` table's
+ ``decompile`` method (#1600).
+
+ 3.41.0 (released 2019-04-29)
+ ----------------------------
+
+ - [varLib/cffLib] Added support for building ``CFF2`` variable font from sparse
+ masters, or masters with more than one model (multiple ``VarStore.VarData``).
+ In ``cffLib.specializer``, added support for ``CFF2`` CharStrings with
+ ``blend`` operators (#1547, #1591).
+ - [subset] Fixed subsetting ``HVAR`` and ``VVAR`` with ``--retain-gids`` option,
+ and when advances mapping is null while sidebearings mappings are non-null
+ (#1587, #1588).
+ - Added ``otlLib.maxContextCalc`` module to compute ``OS/2.usMaxContext`` value.
+ Calculate it automatically when compiling features with feaLib. Added option
+ ``--recalc-max-context`` to ``subset`` module (#1582).
+ - [otBase/otTables] Fixed ``AttributeError`` on missing OT table fields after
+ importing font from TTX (#1584).
+ - [graphite] Fixed typo ``Silf`` table's ``decompile`` method (#1586).
+ - [otlLib] Better compress ``GPOS`` SinglePos (LookupType 1) subtables (#1539).
+
+ 3.40.0 (released 2019-04-08)
+ ----------------------------
+
+ - [subset] Fixed error while subsetting ``VVAR`` with ``--retain-gids``
+ option (#1552).
+ - [designspaceLib] Use up-to-date default location in ``findDefault`` method
+ (#1554).
+ - [voltLib] Allow passing file-like object to Parser.
+ - [arrayTools/glyf] ``calcIntBounds`` (used to compute bounding boxes of glyf
+ table's glyphs) now uses ``otRound`` instead of ``round3`` (#1566).
+ - [svgLib] Added support for converting more SVG shapes to path ``d`` strings
+ (ellipse, line, polyline), as well as support for ``transform`` attributes.
+ Only ``matrix`` transformations are currently supported (#1564, #1564).
+ - [varLib] Added support for building ``VVAR`` table from ``vmtx`` and ``VORG``
+ tables (#1551).
+ - [fontBuilder] Enable making CFF2 fonts with ``post`` table format 2 (#1557).
+ - Fixed ``DeprecationWarning`` on invalid escape sequences (#1562).
+
3.39.0 (released 2019-03-19)
----------------------------
@@ -1684,13 +1742,13 @@ Classifier: Programming Language :: Python :: 3
Classifier: Topic :: Text Processing :: Fonts
Classifier: Topic :: Multimedia :: Graphics
Classifier: Topic :: Multimedia :: Graphics :: Graphics Conversion
-Provides-Extra: symfont
-Provides-Extra: type1
Provides-Extra: lxml
-Provides-Extra: ufo
-Provides-Extra: interpolatable
Provides-Extra: all
Provides-Extra: woff
-Provides-Extra: plot
Provides-Extra: unicode
+Provides-Extra: interpolatable
+Provides-Extra: plot
+Provides-Extra: type1
+Provides-Extra: ufo
+Provides-Extra: symfont
Provides-Extra: graphite
diff --git a/Lib/fonttools.egg-info/SOURCES.txt b/Lib/fonttools.egg-info/SOURCES.txt
index 3bb752fa..4ff321a1 100644
--- a/Lib/fonttools.egg-info/SOURCES.txt
+++ b/Lib/fonttools.egg-info/SOURCES.txt
@@ -137,6 +137,7 @@ Lib/fontTools/mtiLib/__init__.py
Lib/fontTools/mtiLib/__main__.py
Lib/fontTools/otlLib/__init__.py
Lib/fontTools/otlLib/builder.py
+Lib/fontTools/otlLib/maxContextCalc.py
Lib/fontTools/pens/__init__.py
Lib/fontTools/pens/areaPen.py
Lib/fontTools/pens/basePen.py
@@ -336,7 +337,8 @@ Tests/afmLib/afmLib_test.py
Tests/afmLib/data/TestAFM.afm
Tests/cffLib/cffLib_test.py
Tests/cffLib/specializer_test.py
-Tests/cffLib/data/TestOTF.otf
+Tests/cffLib/data/TestOTF.ttx
+Tests/cffLib/data/TestSparseCFF2VF.ttx
Tests/designspaceLib/designspace_test.py
Tests/designspaceLib/data/test.designspace
Tests/encodings/codecs_test.py
@@ -630,6 +632,11 @@ Tests/mtiLib/data/mti/scripttable.ttx.GPOS
Tests/mtiLib/data/mti/scripttable.ttx.GSUB
Tests/mtiLib/data/mti/scripttable.txt
Tests/otlLib/builder_test.py
+Tests/otlLib/maxContextCalc_test.py
+Tests/otlLib/data/gpos_91.ttx
+Tests/otlLib/data/gsub_51.ttx
+Tests/otlLib/data/gsub_52.ttx
+Tests/otlLib/data/gsub_71.ttx
Tests/pens/areaPen_test.py
Tests/pens/basePen_test.py
Tests/pens/boundsPen_test.py
@@ -651,6 +658,7 @@ Tests/subset/data/TestBSLN-3.ttx
Tests/subset/data/TestCID-Regular.ttx
Tests/subset/data/TestCLR-Regular.ttx
Tests/subset/data/TestGVAR.ttx
+Tests/subset/data/TestHVVAR.ttx
Tests/subset/data/TestLCAR-0.ttx
Tests/subset/data/TestLCAR-1.ttx
Tests/subset/data/TestMATH-Regular.ttx
@@ -660,6 +668,8 @@ Tests/subset/data/TestOTF-Regular.ttx
Tests/subset/data/TestPROP.ttx
Tests/subset/data/TestTTF-Regular.ttx
Tests/subset/data/TestTTF-Regular_non_BMP_char.ttx
+Tests/subset/data/expect_HVVAR.ttx
+Tests/subset/data/expect_HVVAR_retain_gids.ttx
Tests/subset/data/expect_ankr.ttx
Tests/subset/data/expect_bsln_0.ttx
Tests/subset/data/expect_bsln_1.ttx
@@ -684,6 +694,8 @@ Tests/subset/data/expect_opbd_1.ttx
Tests/subset/data/expect_prop_0.ttx
Tests/subset/data/expect_prop_1.ttx
Tests/subset/data/google_color.ttx
+Tests/subset/data/test_cntrmask_CFF.desub.ttx
+Tests/subset/data/test_cntrmask_CFF.ttx
Tests/subset/data/test_hinted_subrs_CFF.desub.ttx
Tests/subset/data/test_hinted_subrs_CFF.ttx
Tests/svgLib/path/__init__.py
@@ -1432,10 +1444,23 @@ Tests/varLib/data/InterpolateLayout2.designspace
Tests/varLib/data/InterpolateLayout3.designspace
Tests/varLib/data/SparseMasters.designspace
Tests/varLib/data/TestCFF2.designspace
-Tests/varLib/data/TestCFF2VF.otf
-Tests/varLib/data/master_cff2/TestCFF2_Black.otf
-Tests/varLib/data/master_cff2/TestCFF2_ExtraLight.otf
-Tests/varLib/data/master_cff2/TestCFF2_Regular.otf
+Tests/varLib/data/TestSparseCFF2VF.designspace
+Tests/varLib/data/TestVVAR.designspace
+Tests/varLib/data/master_cff2/TestCFF2_Black.ttx
+Tests/varLib/data/master_cff2/TestCFF2_ExtraLight.ttx
+Tests/varLib/data/master_cff2/TestCFF2_Regular.ttx
+Tests/varLib/data/master_sparse_cff2/MasterSet_Kanji-w0.00.ttx
+Tests/varLib/data/master_sparse_cff2/MasterSet_Kanji-w1000.00.ttx
+Tests/varLib/data/master_sparse_cff2/MasterSet_Kanji-w439.00.ttx
+Tests/varLib/data/master_sparse_cff2/MasterSet_Kanji-w440.00.ttx
+Tests/varLib/data/master_sparse_cff2/MasterSet_Kanji-w599.00.ttx
+Tests/varLib/data/master_sparse_cff2/MasterSet_Kanji-w600.00.ttx
+Tests/varLib/data/master_sparse_cff2/MasterSet_Kanji-w669.00.ttx
+Tests/varLib/data/master_sparse_cff2/MasterSet_Kanji-w670.00.ttx
+Tests/varLib/data/master_sparse_cff2/MasterSet_Kanji-w799.00.ttx
+Tests/varLib/data/master_sparse_cff2/MasterSet_Kanji-w800.00.ttx
+Tests/varLib/data/master_sparse_cff2/MasterSet_Kanji-w889.00.ttx
+Tests/varLib/data/master_sparse_cff2/MasterSet_Kanji-w890.00.ttx
Tests/varLib/data/master_ttx_getvar_ttf/Mutator_Getvar.ttx
Tests/varLib/data/master_ttx_interpolatable_otf/TestFamily2-Master0.ttx
Tests/varLib/data/master_ttx_interpolatable_otf/TestFamily2-Master1.ttx
@@ -1459,6 +1484,7 @@ Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-Regular.ttx
Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-SemiBold.ttx
Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily4-Italic15.ttx
Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily4-Regular.ttx
+Tests/varLib/data/master_ttx_varfont_otf/TestCFF2VF.ttx
Tests/varLib/data/master_ttx_varfont_ttf/Mutator_IUP.ttx
Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/features.fea
Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/fontinfo.plist
@@ -1721,6 +1747,8 @@ Tests/varLib/data/master_ufo/TestFamily4-Regular.ufo/glyphs.public.background/co
Tests/varLib/data/master_ufo/TestFamily4-Regular.ufo/glyphs.public.background/dieresiscomb.glif
Tests/varLib/data/master_ufo/TestFamily4-Regular.ufo/glyphs.public.background/n.glif
Tests/varLib/data/master_ufo/TestFamily4-Regular.ufo/glyphs.public.background/o.glif
+Tests/varLib/data/master_vvar_cff2/TestVVAR.0.ttx
+Tests/varLib/data/master_vvar_cff2/TestVVAR.1.ttx
Tests/varLib/data/test_results/Build.ttx
Tests/varLib/data/test_results/BuildAvarEmptyAxis.ttx
Tests/varLib/data/test_results/BuildAvarIdentityMaps.ttx
@@ -1757,5 +1785,7 @@ Tests/varLib/data/test_results/Mutator.ttx
Tests/varLib/data/test_results/Mutator_Getvar-instance.ttx
Tests/varLib/data/test_results/Mutator_IUP-instance.ttx
Tests/varLib/data/test_results/SparseMasters.ttx
+Tests/varLib/data/test_results/TestSparseCFF2VF.ttx
+Tests/varLib/data/test_results/TestVVAR.ttx
Tests/voltLib/lexer_test.py
Tests/voltLib/parser_test.py \ No newline at end of file